if ($opt{e})
{
- my @plan = map { "$_->[0]\n" } @{$dbi->selectall_arrayref("explain $sql")};
+ my @plan =
+ map { "$_->[0]\n" } @{ $dbi->selectall_arrayref("explain $sql") };
print @plan;
}
{
$catalog{natts} = $1;
}
- elsif (/^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/)
+ elsif (
+ /^DATA\(insert(\s+OID\s+=\s+(\d+))?\s+\(\s*(.*)\s*\)\s*\)$/)
{
- check_natts($filename, $catalog{natts}, $3,
- $input_file, $input_line_number);
+ check_natts($filename, $catalog{natts}, $3, $input_file,
+ $input_line_number);
push @{ $catalog{data} }, { oid => $2, bki_values => $3 };
}
{
my ($catname, $natts, $bki_val, $file, $line) = @_;
- die "Could not find definition for Natts_${catname} before start of DATA() in $file\n"
- unless defined $natts;
+ die
+"Could not find definition for Natts_${catname} before start of DATA() in $file\n"
+ unless defined $natts;
my $nfields = scalar(SplitDataLine($bki_val));
die sprintf
- "Wrong number of attributes in DATA() entry at %s:%d (expected %d but got %d)\n",
- $file, $line, $natts, $nfields
+"Wrong number of attributes in DATA() entry at %s:%d (expected %d but got %d)\n",
+ $file, $line, $natts, $nfields
unless $natts == $nfields;
}
# Split line into tokens without interpreting their meaning.
my %bki_values;
- @bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values});
+ @bki_values{@attnames} =
+ Catalog::SplitDataLine($row->{bki_values});
# Perform required substitutions on fields
foreach my $att (keys %bki_values)
{
+
# Substitute constant values we acquired above.
# (It's intentional that this can apply to parts of a field).
$bki_values{$att} =~ s/\bPGUID\b/$BOOTSTRAP_SUPERUSERID/g;
# just do nothing (regprocin will complain).
if ($bki_attr{$att}->{type} eq 'regproc')
{
- my $procoid = $regprocoids{$bki_values{$att}};
+ my $procoid = $regprocoids{ $bki_values{$att} };
$bki_values{$att} = $procoid
- if defined($procoid) && $procoid ne 'MULTIPLE';
+ if defined($procoid) && $procoid ne 'MULTIPLE';
}
}
# This relies on the order we process the files in!
if ($catname eq 'pg_proc')
{
- if (defined($regprocoids{$bki_values{proname}}))
+ if (defined($regprocoids{ $bki_values{proname} }))
{
- $regprocoids{$bki_values{proname}} = 'MULTIPLE';
+ $regprocoids{ $bki_values{proname} } = 'MULTIPLE';
}
else
{
- $regprocoids{$bki_values{proname}} = $row->{oid};
+ $regprocoids{ $bki_values{proname} } = $row->{oid};
}
}
printf $bki "insert %s( %s )\n", $oid,
join(' ', @bki_values{@attnames});
- # Write comments to postgres.description and postgres.shdescription
+ # Write comments to postgres.description and postgres.shdescription
if (defined $row->{descr})
{
printf $descr "%s\t%s\t0\t%s\n", $row->{oid}, $catname,
my $row = shift;
my @attnames = @_;
my $oid = $row->{oid} ? "OID = $row->{oid} " : '';
- my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_}, @attnames;
+ my $bki_values = join ' ', map { $_ eq '' ? '""' : $_ } map $row->{$_},
+ @attnames;
printf $bki "insert %s( %s )\n", $oid, $bki_values;
}
$row->{attidentity} ||= '\0';
# Supply appropriate quoting for these fields.
- $row->{attname} = q|{"| . $row->{attname} . q|"}|;
- $row->{attstorage} = q|'| . $row->{attstorage} . q|'|;
- $row->{attalign} = q|'| . $row->{attalign} . q|'|;
+ $row->{attname} = q|{"| . $row->{attname} . q|"}|;
+ $row->{attstorage} = q|'| . $row->{attstorage} . q|'|;
+ $row->{attalign} = q|'| . $row->{attalign} . q|'|;
$row->{attidentity} = q|'| . $row->{attidentity} . q|'|;
# We don't emit initializers for the variable length fields at all.
# Now read in kwlist.h
-open(my $kwlist, '<', $kwlist_filename) || die("Could not open : $kwlist_filename");
+open(my $kwlist, '<', $kwlist_filename)
+ || die("Could not open : $kwlist_filename");
my $prevkwstring = '';
my $bare_kwname;
my $data = $catalogs->{pg_proc}->{data};
foreach my $row (@$data)
{
+
# Split line into tokens without interpreting their meaning.
my %bki_values;
@bki_values{@attnames} = Catalog::SplitDataLine($row->{bki_values});
}
# Emit headers for both files
-my $tmpext = ".tmp$$";
-my $oidsfile = $output_path . 'fmgroids.h';
+my $tmpext = ".tmp$$";
+my $oidsfile = $output_path . 'fmgroids.h';
my $protosfile = $output_path . 'fmgrprotos.h';
-my $tabfile = $output_path . 'fmgrtab.c';
+my $tabfile = $output_path . 'fmgrtab.c';
-open my $ofh, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
-open my $pfh, '>', $protosfile . $tmpext or die "Could not open $protosfile$tmpext: $!";
-open my $tfh, '>', $tabfile . $tmpext or die "Could not open $tabfile$tmpext: $!";
+open my $ofh, '>', $oidsfile . $tmpext
+ or die "Could not open $oidsfile$tmpext: $!";
+open my $pfh, '>', $protosfile . $tmpext
+ or die "Could not open $protosfile$tmpext: $!";
+open my $tfh, '>', $tabfile . $tmpext
+ or die "Could not open $tabfile$tmpext: $!";
print $ofh
qq|/*-------------------------------------------------------------------------
close($tfh);
# Finally, rename the completed files into place.
-Catalog::RenameTempFile($oidsfile, $tmpext);
+Catalog::RenameTempFile($oidsfile, $tmpext);
Catalog::RenameTempFile($protosfile, $tmpext);
-Catalog::RenameTempFile($tabfile, $tmpext);
+Catalog::RenameTempFile($tabfile, $tmpext);
sub usage
{
# Load CP950.TXT
my $cp950txt = &read_source("CP950.TXT");
-foreach my $i (@$cp950txt) {
+foreach my $i (@$cp950txt)
+{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
# Pick only the ETEN extended characters in the range 0xf9d6 - 0xf9dc
# from CP950.TXT
&& $code >= 0xf9d6
&& $code <= 0xf9dc)
{
- push @$all, {code => $code,
- ucs => $ucs,
- comment => $i->{comment},
- direction => BOTH,
- f => $i->{f},
- l => $i->{l} };
+ push @$all,
+ { code => $code,
+ ucs => $ucs,
+ comment => $i->{comment},
+ direction => BOTH,
+ f => $i->{f},
+ l => $i->{l} };
}
}
-foreach my $i (@$all) {
+foreach my $i (@$all)
+{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
- # BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
- # contain only one of them. XXX: Doesn't really make sense to include any of them,
- # but for historical reasons, we map the first one of them.
+# BIG5.TXT maps several BIG5 characters to U+FFFD. The UTF-8 to BIG5 mapping can
+# contain only one of them. XXX: Doesn't really make sense to include any of them,
+# but for historical reasons, we map the first one of them.
if ($i->{ucs} == 0xFFFD && $i->{code} != 0xA15A)
{
$i->{direction} = TO_UNICODE;
# a lot of extra characters on top of the GB2312 character set that
# EUC_CN encodes. Filter out those extra characters.
next if (($code & 0xFF) < 0xA1);
- next if (!($code >= 0xA100 && $code <= 0xA9FF ||
- $code >= 0xB000 && $code <= 0xF7FF));
+ next
+ if (
+ !( $code >= 0xA100 && $code <= 0xA9FF
+ || $code >= 0xB000 && $code <= 0xF7FF));
next if ($code >= 0xA2A1 && $code <= 0xA2B0);
next if ($code >= 0xA2E3 && $code <= 0xA2E4);
$ucs = 0x2015;
}
- push @mapping, {
- ucs => $ucs,
- code => $code,
+ push @mapping,
+ { ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
- };
+ f => $in_file,
+ l => $. };
}
close($in);
{
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{
+
# combined characters
my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4;
my $ucs1 = hex($u1);
my $ucs2 = hex($u2);
- push @all, { direction => BOTH,
- ucs => $ucs1,
- ucs_second => $ucs2,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
- };
+ push @all,
+ { direction => BOTH,
+ ucs => $ucs1,
+ ucs_second => $ucs2,
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $. };
}
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{
+
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u);
next if ($code < 0x80 && $ucs < 0x80);
- push @all, { direction => BOTH,
- ucs => $ucs,
- code => $code,
- comment => $rest,
- f => $in_file,
- l => $.
- };
+ push @all,
+ { direction => BOTH,
+ ucs => $ucs,
+ code => $code,
+ comment => $rest,
+ f => $in_file,
+ l => $. };
}
}
close($in);
my @mapping;
-foreach my $i (@$jis0212) {
+foreach my $i (@$jis0212)
+{
+
# We have a different mapping for this in the EUC_JP to UTF-8 direction.
if ($i->{code} == 0x2243)
{
# Load CP932.TXT.
my $ct932 = &read_source("CP932.TXT");
-foreach my $i (@$ct932) {
+foreach my $i (@$ct932)
+{
my $sjis = $i->{code};
# We have a different mapping for this in the EUC_JP to UTF-8 direction.
- if ($sjis == 0xeefa ||
- $sjis == 0xeefb ||
- $sjis == 0xeefc)
+ if ( $sjis == 0xeefa
+ || $sjis == 0xeefb
+ || $sjis == 0xeefc)
{
next;
}
{
my $jis = &sjis2jis($sjis);
- $i->{code} = $jis | ($jis < 0x100 ? 0x8e00 :
- ($sjis >= 0xeffd ? 0x8f8080 : 0x8080));
+ $i->{code} = $jis | (
+ $jis < 0x100
+ ? 0x8e00
+ : ($sjis >= 0xeffd ? 0x8f8080 : 0x8080));
# Remember the SJIS code for later.
$i->{sjis} = $sjis;
}
}
-foreach my $i (@mapping) {
+foreach my $i (@mapping)
+{
my $sjis = $i->{sjis};
# These SJIS characters are excluded completely.
- if ($sjis >= 0xed00 && $sjis <= 0xeef9 ||
- $sjis >= 0xfa54 && $sjis <= 0xfa56 ||
- $sjis >= 0xfa58 && $sjis <= 0xfc4b)
+ if ( $sjis >= 0xed00 && $sjis <= 0xeef9
+ || $sjis >= 0xfa54 && $sjis <= 0xfa56
+ || $sjis >= 0xfa58 && $sjis <= 0xfc4b)
{
$i->{direction} = NONE;
next;
next;
}
- if ($sjis == 0x8790 || $sjis == 0x8791 || $sjis == 0x8792 ||
- $sjis == 0x8795 || $sjis == 0x8796 || $sjis == 0x8797 ||
- $sjis == 0x879a || $sjis == 0x879b || $sjis == 0x879c ||
- ($sjis >= 0xfa4a && $sjis <= 0xfa53))
+ if ( $sjis == 0x8790
+ || $sjis == 0x8791
+ || $sjis == 0x8792
+ || $sjis == 0x8795
+ || $sjis == 0x8796
+ || $sjis == 0x8797
+ || $sjis == 0x879a
+ || $sjis == 0x879b
+ || $sjis == 0x879c
+ || ($sjis >= 0xfa4a && $sjis <= 0xfa53))
{
$i->{direction} = TO_UNICODE;
next;
}
push @mapping, (
- {direction => BOTH, ucs => 0x4efc, code => 0x8ff4af, comment => '# CJK(4EFC)'},
- {direction => BOTH, ucs => 0x50f4, code => 0x8ff4b0, comment => '# CJK(50F4)'},
- {direction => BOTH, ucs => 0x51EC, code => 0x8ff4b1, comment => '# CJK(51EC)'},
- {direction => BOTH, ucs => 0x5307, code => 0x8ff4b2, comment => '# CJK(5307)'},
- {direction => BOTH, ucs => 0x5324, code => 0x8ff4b3, comment => '# CJK(5324)'},
- {direction => BOTH, ucs => 0x548A, code => 0x8ff4b5, comment => '# CJK(548A)'},
- {direction => BOTH, ucs => 0x5759, code => 0x8ff4b6, comment => '# CJK(5759)'},
- {direction => BOTH, ucs => 0x589E, code => 0x8ff4b9, comment => '# CJK(589E)'},
- {direction => BOTH, ucs => 0x5BEC, code => 0x8ff4ba, comment => '# CJK(5BEC)'},
- {direction => BOTH, ucs => 0x5CF5, code => 0x8ff4bb, comment => '# CJK(5CF5)'},
- {direction => BOTH, ucs => 0x5D53, code => 0x8ff4bc, comment => '# CJK(5D53)'},
- {direction => BOTH, ucs => 0x5FB7, code => 0x8ff4be, comment => '# CJK(5FB7)'},
- {direction => BOTH, ucs => 0x6085, code => 0x8ff4bf, comment => '# CJK(6085)'},
- {direction => BOTH, ucs => 0x6120, code => 0x8ff4c0, comment => '# CJK(6120)'},
- {direction => BOTH, ucs => 0x654E, code => 0x8ff4c1, comment => '# CJK(654E)'},
- {direction => BOTH, ucs => 0x663B, code => 0x8ff4c2, comment => '# CJK(663B)'},
- {direction => BOTH, ucs => 0x6665, code => 0x8ff4c3, comment => '# CJK(6665)'},
- {direction => BOTH, ucs => 0x6801, code => 0x8ff4c6, comment => '# CJK(6801)'},
- {direction => BOTH, ucs => 0x6A6B, code => 0x8ff4c9, comment => '# CJK(6A6B)'},
- {direction => BOTH, ucs => 0x6AE2, code => 0x8ff4ca, comment => '# CJK(6AE2)'},
- {direction => BOTH, ucs => 0x6DF2, code => 0x8ff4cc, comment => '# CJK(6DF2)'},
- {direction => BOTH, ucs => 0x6DF8, code => 0x8ff4cb, comment => '# CJK(6DF8)'},
- {direction => BOTH, ucs => 0x7028, code => 0x8ff4cd, comment => '# CJK(7028)'},
- {direction => BOTH, ucs => 0x70BB, code => 0x8ff4ae, comment => '# CJK(70BB)'},
- {direction => BOTH, ucs => 0x7501, code => 0x8ff4d0, comment => '# CJK(7501)'},
- {direction => BOTH, ucs => 0x7682, code => 0x8ff4d1, comment => '# CJK(7682)'},
- {direction => BOTH, ucs => 0x769E, code => 0x8ff4d2, comment => '# CJK(769E)'},
- {direction => BOTH, ucs => 0x7930, code => 0x8ff4d4, comment => '# CJK(7930)'},
- {direction => BOTH, ucs => 0x7AE7, code => 0x8ff4d9, comment => '# CJK(7AE7)'},
- {direction => BOTH, ucs => 0x7DA0, code => 0x8ff4dc, comment => '# CJK(7DA0)'},
- {direction => BOTH, ucs => 0x7DD6, code => 0x8ff4dd, comment => '# CJK(7DD6)'},
- {direction => BOTH, ucs => 0x8362, code => 0x8ff4df, comment => '# CJK(8362)'},
- {direction => BOTH, ucs => 0x85B0, code => 0x8ff4e1, comment => '# CJK(85B0)'},
- {direction => BOTH, ucs => 0x8807, code => 0x8ff4e4, comment => '# CJK(8807)'},
- {direction => BOTH, ucs => 0x8B7F, code => 0x8ff4e6, comment => '# CJK(8B7F)'},
- {direction => BOTH, ucs => 0x8CF4, code => 0x8ff4e7, comment => '# CJK(8CF4)'},
- {direction => BOTH, ucs => 0x8D76, code => 0x8ff4e8, comment => '# CJK(8D76)'},
- {direction => BOTH, ucs => 0x90DE, code => 0x8ff4ec, comment => '# CJK(90DE)'},
- {direction => BOTH, ucs => 0x9115, code => 0x8ff4ee, comment => '# CJK(9115)'},
- {direction => BOTH, ucs => 0x9592, code => 0x8ff4f1, comment => '# CJK(9592)'},
- {direction => BOTH, ucs => 0x973B, code => 0x8ff4f4, comment => '# CJK(973B)'},
- {direction => BOTH, ucs => 0x974D, code => 0x8ff4f5, comment => '# CJK(974D)'},
- {direction => BOTH, ucs => 0x9751, code => 0x8ff4f6, comment => '# CJK(9751)'},
- {direction => BOTH, ucs => 0x999E, code => 0x8ff4fa, comment => '# CJK(999E)'},
- {direction => BOTH, ucs => 0x9AD9, code => 0x8ff4fb, comment => '# CJK(9AD9)'},
- {direction => BOTH, ucs => 0x9B72, code => 0x8ff4fc, comment => '# CJK(9B72)'},
- {direction => BOTH, ucs => 0x9ED1, code => 0x8ff4fe, comment => '# CJK(9ED1)'},
- {direction => BOTH, ucs => 0xF929, code => 0x8ff4c5, comment => '# CJK COMPATIBILITY IDEOGRAPH-F929'},
- {direction => BOTH, ucs => 0xF9DC, code => 0x8ff4f2, comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC'},
- {direction => BOTH, ucs => 0xFA0E, code => 0x8ff4b4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E'},
- {direction => BOTH, ucs => 0xFA0F, code => 0x8ff4b7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F'},
- {direction => BOTH, ucs => 0xFA10, code => 0x8ff4b8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10'},
- {direction => BOTH, ucs => 0xFA11, code => 0x8ff4bd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11'},
- {direction => BOTH, ucs => 0xFA12, code => 0x8ff4c4, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12'},
- {direction => BOTH, ucs => 0xFA13, code => 0x8ff4c7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13'},
- {direction => BOTH, ucs => 0xFA14, code => 0x8ff4c8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14'},
- {direction => BOTH, ucs => 0xFA15, code => 0x8ff4ce, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15'},
- {direction => BOTH, ucs => 0xFA16, code => 0x8ff4cf, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16'},
- {direction => BOTH, ucs => 0xFA17, code => 0x8ff4d3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17'},
- {direction => BOTH, ucs => 0xFA18, code => 0x8ff4d5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18'},
- {direction => BOTH, ucs => 0xFA19, code => 0x8ff4d6, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19'},
- {direction => BOTH, ucs => 0xFA1A, code => 0x8ff4d7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A'},
- {direction => BOTH, ucs => 0xFA1B, code => 0x8ff4d8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B'},
- {direction => BOTH, ucs => 0xFA1C, code => 0x8ff4da, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C'},
- {direction => BOTH, ucs => 0xFA1D, code => 0x8ff4db, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D'},
- {direction => BOTH, ucs => 0xFA1E, code => 0x8ff4de, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E'},
- {direction => BOTH, ucs => 0xFA1F, code => 0x8ff4e0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F'},
- {direction => BOTH, ucs => 0xFA20, code => 0x8ff4e2, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20'},
- {direction => BOTH, ucs => 0xFA21, code => 0x8ff4e3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21'},
- {direction => BOTH, ucs => 0xFA22, code => 0x8ff4e5, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22'},
- {direction => BOTH, ucs => 0xFA23, code => 0x8ff4e9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23'},
- {direction => BOTH, ucs => 0xFA24, code => 0x8ff4ea, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24'},
- {direction => BOTH, ucs => 0xFA25, code => 0x8ff4eb, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25'},
- {direction => BOTH, ucs => 0xFA26, code => 0x8ff4ed, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26'},
- {direction => BOTH, ucs => 0xFA27, code => 0x8ff4ef, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27'},
- {direction => BOTH, ucs => 0xFA28, code => 0x8ff4f0, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28'},
- {direction => BOTH, ucs => 0xFA29, code => 0x8ff4f3, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29'},
- {direction => BOTH, ucs => 0xFA2A, code => 0x8ff4f7, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A'},
- {direction => BOTH, ucs => 0xFA2B, code => 0x8ff4f8, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B'},
- {direction => BOTH, ucs => 0xFA2C, code => 0x8ff4f9, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C'},
- {direction => BOTH, ucs => 0xFA2D, code => 0x8ff4fd, comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D'},
- {direction => BOTH, ucs => 0xFF07, code => 0x8ff4a9, comment => '# FULLWIDTH APOSTROPHE'},
- {direction => BOTH, ucs => 0xFFE4, code => 0x8fa2c3, comment => '# FULLWIDTH BROKEN BAR'},
-
- # additional conversions for EUC_JP -> UTF-8 conversion
- {direction => TO_UNICODE, ucs => 0x2116, code => 0x8ff4ac, comment => '# NUMERO SIGN'},
- {direction => TO_UNICODE, ucs => 0x2121, code => 0x8ff4ad, comment => '# TELEPHONE SIGN'},
- {direction => TO_UNICODE, ucs => 0x3231, code => 0x8ff4ab, comment => '# PARENTHESIZED IDEOGRAPH STOCK'}
- );
+ { direction => BOTH,
+ ucs => 0x4efc,
+ code => 0x8ff4af,
+ comment => '# CJK(4EFC)' },
+ { direction => BOTH,
+ ucs => 0x50f4,
+ code => 0x8ff4b0,
+ comment => '# CJK(50F4)' },
+ { direction => BOTH,
+ ucs => 0x51EC,
+ code => 0x8ff4b1,
+ comment => '# CJK(51EC)' },
+ { direction => BOTH,
+ ucs => 0x5307,
+ code => 0x8ff4b2,
+ comment => '# CJK(5307)' },
+ { direction => BOTH,
+ ucs => 0x5324,
+ code => 0x8ff4b3,
+ comment => '# CJK(5324)' },
+ { direction => BOTH,
+ ucs => 0x548A,
+ code => 0x8ff4b5,
+ comment => '# CJK(548A)' },
+ { direction => BOTH,
+ ucs => 0x5759,
+ code => 0x8ff4b6,
+ comment => '# CJK(5759)' },
+ { direction => BOTH,
+ ucs => 0x589E,
+ code => 0x8ff4b9,
+ comment => '# CJK(589E)' },
+ { direction => BOTH,
+ ucs => 0x5BEC,
+ code => 0x8ff4ba,
+ comment => '# CJK(5BEC)' },
+ { direction => BOTH,
+ ucs => 0x5CF5,
+ code => 0x8ff4bb,
+ comment => '# CJK(5CF5)' },
+ { direction => BOTH,
+ ucs => 0x5D53,
+ code => 0x8ff4bc,
+ comment => '# CJK(5D53)' },
+ { direction => BOTH,
+ ucs => 0x5FB7,
+ code => 0x8ff4be,
+ comment => '# CJK(5FB7)' },
+ { direction => BOTH,
+ ucs => 0x6085,
+ code => 0x8ff4bf,
+ comment => '# CJK(6085)' },
+ { direction => BOTH,
+ ucs => 0x6120,
+ code => 0x8ff4c0,
+ comment => '# CJK(6120)' },
+ { direction => BOTH,
+ ucs => 0x654E,
+ code => 0x8ff4c1,
+ comment => '# CJK(654E)' },
+ { direction => BOTH,
+ ucs => 0x663B,
+ code => 0x8ff4c2,
+ comment => '# CJK(663B)' },
+ { direction => BOTH,
+ ucs => 0x6665,
+ code => 0x8ff4c3,
+ comment => '# CJK(6665)' },
+ { direction => BOTH,
+ ucs => 0x6801,
+ code => 0x8ff4c6,
+ comment => '# CJK(6801)' },
+ { direction => BOTH,
+ ucs => 0x6A6B,
+ code => 0x8ff4c9,
+ comment => '# CJK(6A6B)' },
+ { direction => BOTH,
+ ucs => 0x6AE2,
+ code => 0x8ff4ca,
+ comment => '# CJK(6AE2)' },
+ { direction => BOTH,
+ ucs => 0x6DF2,
+ code => 0x8ff4cc,
+ comment => '# CJK(6DF2)' },
+ { direction => BOTH,
+ ucs => 0x6DF8,
+ code => 0x8ff4cb,
+ comment => '# CJK(6DF8)' },
+ { direction => BOTH,
+ ucs => 0x7028,
+ code => 0x8ff4cd,
+ comment => '# CJK(7028)' },
+ { direction => BOTH,
+ ucs => 0x70BB,
+ code => 0x8ff4ae,
+ comment => '# CJK(70BB)' },
+ { direction => BOTH,
+ ucs => 0x7501,
+ code => 0x8ff4d0,
+ comment => '# CJK(7501)' },
+ { direction => BOTH,
+ ucs => 0x7682,
+ code => 0x8ff4d1,
+ comment => '# CJK(7682)' },
+ { direction => BOTH,
+ ucs => 0x769E,
+ code => 0x8ff4d2,
+ comment => '# CJK(769E)' },
+ { direction => BOTH,
+ ucs => 0x7930,
+ code => 0x8ff4d4,
+ comment => '# CJK(7930)' },
+ { direction => BOTH,
+ ucs => 0x7AE7,
+ code => 0x8ff4d9,
+ comment => '# CJK(7AE7)' },
+ { direction => BOTH,
+ ucs => 0x7DA0,
+ code => 0x8ff4dc,
+ comment => '# CJK(7DA0)' },
+ { direction => BOTH,
+ ucs => 0x7DD6,
+ code => 0x8ff4dd,
+ comment => '# CJK(7DD6)' },
+ { direction => BOTH,
+ ucs => 0x8362,
+ code => 0x8ff4df,
+ comment => '# CJK(8362)' },
+ { direction => BOTH,
+ ucs => 0x85B0,
+ code => 0x8ff4e1,
+ comment => '# CJK(85B0)' },
+ { direction => BOTH,
+ ucs => 0x8807,
+ code => 0x8ff4e4,
+ comment => '# CJK(8807)' },
+ { direction => BOTH,
+ ucs => 0x8B7F,
+ code => 0x8ff4e6,
+ comment => '# CJK(8B7F)' },
+ { direction => BOTH,
+ ucs => 0x8CF4,
+ code => 0x8ff4e7,
+ comment => '# CJK(8CF4)' },
+ { direction => BOTH,
+ ucs => 0x8D76,
+ code => 0x8ff4e8,
+ comment => '# CJK(8D76)' },
+ { direction => BOTH,
+ ucs => 0x90DE,
+ code => 0x8ff4ec,
+ comment => '# CJK(90DE)' },
+ { direction => BOTH,
+ ucs => 0x9115,
+ code => 0x8ff4ee,
+ comment => '# CJK(9115)' },
+ { direction => BOTH,
+ ucs => 0x9592,
+ code => 0x8ff4f1,
+ comment => '# CJK(9592)' },
+ { direction => BOTH,
+ ucs => 0x973B,
+ code => 0x8ff4f4,
+ comment => '# CJK(973B)' },
+ { direction => BOTH,
+ ucs => 0x974D,
+ code => 0x8ff4f5,
+ comment => '# CJK(974D)' },
+ { direction => BOTH,
+ ucs => 0x9751,
+ code => 0x8ff4f6,
+ comment => '# CJK(9751)' },
+ { direction => BOTH,
+ ucs => 0x999E,
+ code => 0x8ff4fa,
+ comment => '# CJK(999E)' },
+ { direction => BOTH,
+ ucs => 0x9AD9,
+ code => 0x8ff4fb,
+ comment => '# CJK(9AD9)' },
+ { direction => BOTH,
+ ucs => 0x9B72,
+ code => 0x8ff4fc,
+ comment => '# CJK(9B72)' },
+ { direction => BOTH,
+ ucs => 0x9ED1,
+ code => 0x8ff4fe,
+ comment => '# CJK(9ED1)' },
+ { direction => BOTH,
+ ucs => 0xF929,
+ code => 0x8ff4c5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F929' },
+ { direction => BOTH,
+ ucs => 0xF9DC,
+ code => 0x8ff4f2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-F9DC' },
+ { direction => BOTH,
+ ucs => 0xFA0E,
+ code => 0x8ff4b4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0E' },
+ { direction => BOTH,
+ ucs => 0xFA0F,
+ code => 0x8ff4b7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA0F' },
+ { direction => BOTH,
+ ucs => 0xFA10,
+ code => 0x8ff4b8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA10' },
+ { direction => BOTH,
+ ucs => 0xFA11,
+ code => 0x8ff4bd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA11' },
+ { direction => BOTH,
+ ucs => 0xFA12,
+ code => 0x8ff4c4,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA12' },
+ { direction => BOTH,
+ ucs => 0xFA13,
+ code => 0x8ff4c7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA13' },
+ { direction => BOTH,
+ ucs => 0xFA14,
+ code => 0x8ff4c8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA14' },
+ { direction => BOTH,
+ ucs => 0xFA15,
+ code => 0x8ff4ce,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA15' },
+ { direction => BOTH,
+ ucs => 0xFA16,
+ code => 0x8ff4cf,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA16' },
+ { direction => BOTH,
+ ucs => 0xFA17,
+ code => 0x8ff4d3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA17' },
+ { direction => BOTH,
+ ucs => 0xFA18,
+ code => 0x8ff4d5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA18' },
+ { direction => BOTH,
+ ucs => 0xFA19,
+ code => 0x8ff4d6,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA19' },
+ { direction => BOTH,
+ ucs => 0xFA1A,
+ code => 0x8ff4d7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1A' },
+ { direction => BOTH,
+ ucs => 0xFA1B,
+ code => 0x8ff4d8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1B' },
+ { direction => BOTH,
+ ucs => 0xFA1C,
+ code => 0x8ff4da,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1C' },
+ { direction => BOTH,
+ ucs => 0xFA1D,
+ code => 0x8ff4db,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1D' },
+ { direction => BOTH,
+ ucs => 0xFA1E,
+ code => 0x8ff4de,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1E' },
+ { direction => BOTH,
+ ucs => 0xFA1F,
+ code => 0x8ff4e0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA1F' },
+ { direction => BOTH,
+ ucs => 0xFA20,
+ code => 0x8ff4e2,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA20' },
+ { direction => BOTH,
+ ucs => 0xFA21,
+ code => 0x8ff4e3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA21' },
+ { direction => BOTH,
+ ucs => 0xFA22,
+ code => 0x8ff4e5,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA22' },
+ { direction => BOTH,
+ ucs => 0xFA23,
+ code => 0x8ff4e9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA23' },
+ { direction => BOTH,
+ ucs => 0xFA24,
+ code => 0x8ff4ea,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA24' },
+ { direction => BOTH,
+ ucs => 0xFA25,
+ code => 0x8ff4eb,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA25' },
+ { direction => BOTH,
+ ucs => 0xFA26,
+ code => 0x8ff4ed,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA26' },
+ { direction => BOTH,
+ ucs => 0xFA27,
+ code => 0x8ff4ef,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA27' },
+ { direction => BOTH,
+ ucs => 0xFA28,
+ code => 0x8ff4f0,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA28' },
+ { direction => BOTH,
+ ucs => 0xFA29,
+ code => 0x8ff4f3,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA29' },
+ { direction => BOTH,
+ ucs => 0xFA2A,
+ code => 0x8ff4f7,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2A' },
+ { direction => BOTH,
+ ucs => 0xFA2B,
+ code => 0x8ff4f8,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2B' },
+ { direction => BOTH,
+ ucs => 0xFA2C,
+ code => 0x8ff4f9,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2C' },
+ { direction => BOTH,
+ ucs => 0xFA2D,
+ code => 0x8ff4fd,
+ comment => '# CJK COMPATIBILITY IDEOGRAPH-FA2D' },
+ { direction => BOTH,
+ ucs => 0xFF07,
+ code => 0x8ff4a9,
+ comment => '# FULLWIDTH APOSTROPHE' },
+ { direction => BOTH,
+ ucs => 0xFFE4,
+ code => 0x8fa2c3,
+ comment => '# FULLWIDTH BROKEN BAR' },
+
+ # additional conversions for EUC_JP -> UTF-8 conversion
+ { direction => TO_UNICODE,
+ ucs => 0x2116,
+ code => 0x8ff4ac,
+ comment => '# NUMERO SIGN' },
+ { direction => TO_UNICODE,
+ ucs => 0x2121,
+ code => 0x8ff4ad,
+ comment => '# TELEPHONE SIGN' },
+ { direction => TO_UNICODE,
+ ucs => 0x3231,
+ code => 0x8ff4ab,
+ comment => '# PARENTHESIZED IDEOGRAPH STOCK' });
print_conversion_tables($this_script, "EUC_JP", \@mapping);
if ($pos >= 114 * 0x5e && $pos <= 115 * 0x5e + 0x1b)
{
+
# This region (115-ku) is out of range of JIS code but for
# convenient to generate code in EUC CODESET 3, move this to
# seemingly duplicate region (83-84-ku).
}
# Some extra characters that are not in KSX1001.TXT
-push @$mapping,(
- {direction => BOTH, ucs => 0x20AC, code => 0xa2e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__},
- {direction => BOTH, ucs => 0x00AE, code => 0xa2e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ },
- {direction => BOTH, ucs => 0x327E, code => 0xa2e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ }
- );
+push @$mapping,
+ ( { direction => BOTH,
+ ucs => 0x20AC,
+ code => 0xa2e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => BOTH,
+ ucs => 0x00AE,
+ code => 0xa2e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => BOTH,
+ ucs => 0x327E,
+ code => 0xa2e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__ });
print_conversion_tables($this_script, "EUC_KR", $mapping);
foreach my $i (@$mapping)
{
- my $ucs = $i->{ucs};
- my $code = $i->{code};
+ my $ucs = $i->{ucs};
+ my $code = $i->{code};
my $origcode = $i->{code};
my $plane = ($code & 0x1f0000) >> 16;
# Some codes are mapped twice in the EUC_TW to UTF-8 table.
if ($origcode >= 0x12121 && $origcode <= 0x20000)
{
- push @extras, {
- ucs => $i->{ucs},
- code => ($i->{code} + 0x8ea10000),
- rest => $i->{rest},
+ push @extras,
+ { ucs => $i->{ucs},
+ code => ($i->{code} + 0x8ea10000),
+ rest => $i->{rest},
direction => TO_UNICODE,
- f => $i->{f},
- l => $i->{l}
- };
+ f => $i->{f},
+ l => $i->{l} };
}
}
my $code = hex($c);
if ($code >= 0x80 && $ucs >= 0x0080)
{
- push @mapping, {
- ucs => $ucs,
- code => $code,
+ push @mapping,
+ { ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
- };
+ f => $in_file,
+ l => $. };
}
}
close($in);
my $mapping = &read_source("JOHAB.TXT");
# Some extra characters that are not in JOHAB.TXT
-push @$mapping, (
- {direction => BOTH, ucs => 0x20AC, code => 0xd9e6, comment => '# EURO SIGN', f => $this_script, l => __LINE__ },
- {direction => BOTH, ucs => 0x00AE, code => 0xd9e7, comment => '# REGISTERED SIGN', f => $this_script, l => __LINE__ },
- {direction => BOTH, ucs => 0x327E, code => 0xd9e8, comment => '# CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ }
- );
+push @$mapping,
+ ( { direction => BOTH,
+ ucs => 0x20AC,
+ code => 0xd9e6,
+ comment => '# EURO SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => BOTH,
+ ucs => 0x00AE,
+ code => 0xd9e7,
+ comment => '# REGISTERED SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => BOTH,
+ ucs => 0x327E,
+ code => 0xd9e8,
+ comment => '# CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__ });
print_conversion_tables($this_script, "JOHAB", $mapping);
{
if ($line =~ /^0x(.*)[ \t]*U\+(.*)\+(.*)[ \t]*#(.*)$/)
{
+
# combined characters
my ($c, $u1, $u2) = ($1, $2, $3);
my $rest = "U+" . $u1 . "+" . $u2 . $4;
my $ucs1 = hex($u1);
my $ucs2 = hex($u2);
- push @mapping, {
- code => $code,
- ucs => $ucs1,
+ push @mapping,
+ { code => $code,
+ ucs => $ucs1,
ucs_second => $ucs2,
- comment => $rest,
- direction => BOTH,
- f => $in_file,
- l => $.
- };
+ comment => $rest,
+ direction => BOTH,
+ f => $in_file,
+ l => $. };
}
elsif ($line =~ /^0x(.*)[ \t]*U\+(.*)[ \t]*#(.*)$/)
{
+
# non-combined characters
my ($c, $u, $rest) = ($1, $2, "U+" . $2 . $3);
my $ucs = hex($u);
$direction = BOTH;
}
- push @mapping, {
- code => $code,
- ucs => $ucs,
- comment => $rest,
+ push @mapping,
+ { code => $code,
+ ucs => $ucs,
+ comment => $rest,
direction => $direction,
- f => $in_file,
- l => $.
- };
+ f => $in_file,
+ l => $. };
}
}
close($in);
my $mapping = read_source("CP932.TXT");
# Drop these SJIS codes from the source for UTF8=>SJIS conversion
-my @reject_sjis =(
- 0xed40..0xeefc, 0x8754..0x875d, 0x878a, 0x8782,
- 0x8784, 0xfa5b, 0xfa54, 0x8790..0x8792, 0x8795..0x8797,
- 0x879a..0x879c
-);
+my @reject_sjis = (
+ 0xed40 .. 0xeefc, 0x8754 .. 0x875d, 0x878a, 0x8782,
+ 0x8784, 0xfa5b, 0xfa54, 0x8790 .. 0x8792,
+ 0x8795 .. 0x8797, 0x879a .. 0x879c);
foreach my $i (@$mapping)
{
my $code = $i->{code};
- my $ucs = $i->{ucs};
+ my $ucs = $i->{ucs};
- if (grep {$code == $_} @reject_sjis)
+ if (grep { $code == $_ } @reject_sjis)
{
$i->{direction} = TO_UNICODE;
}
}
# Add these UTF8->SJIS pairs to the table.
-push @$mapping, (
- {direction => FROM_UNICODE, ucs => 0x00a2, code => 0x8191, comment => '# CENT SIGN', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x00a3, code => 0x8192, comment => '# POUND SIGN', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x00a5, code => 0x5c, comment => '# YEN SIGN', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x00ac, code => 0x81ca, comment => '# NOT SIGN', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x2016, code => 0x8161, comment => '# DOUBLE VERTICAL LINE', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x203e, code => 0x7e, comment => '# OVERLINE', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x2212, code => 0x817c, comment => '# MINUS SIGN', f => $this_script, l => __LINE__ },
- {direction => FROM_UNICODE, ucs => 0x301c, code => 0x8160, comment => '# WAVE DASH', f => $this_script, l => __LINE__ }
- );
+push @$mapping,
+ ( { direction => FROM_UNICODE,
+ ucs => 0x00a2,
+ code => 0x8191,
+ comment => '# CENT SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x00a3,
+ code => 0x8192,
+ comment => '# POUND SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x00a5,
+ code => 0x5c,
+ comment => '# YEN SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x00ac,
+ code => 0x81ca,
+ comment => '# NOT SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x2016,
+ code => 0x8161,
+ comment => '# DOUBLE VERTICAL LINE',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x203e,
+ code => 0x7e,
+ comment => '# OVERLINE',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x2212,
+ code => 0x817c,
+ comment => '# MINUS SIGN',
+ f => $this_script,
+ l => __LINE__ },
+ { direction => FROM_UNICODE,
+ ucs => 0x301c,
+ code => 0x8160,
+ comment => '# WAVE DASH',
+ f => $this_script,
+ l => __LINE__ });
print_conversion_tables($this_script, "SJIS", $mapping);
if ($code >= 0x80 && $ucs >= 0x0080)
{
- push @mapping, {
- ucs => $ucs,
- code => $code,
+ push @mapping,
+ { ucs => $ucs,
+ code => $code,
direction => BOTH,
- f => $in_file,
- l => $.
- };
+ f => $in_file,
+ l => $. };
}
}
close($in);
# One extra character that's not in the source file.
-push @mapping, { direction => BOTH, code => 0xa2e8, ucs => 0x327e, comment => 'CIRCLED HANGUL IEUNG U', f => $this_script, l => __LINE__ };
+push @mapping,
+ { direction => BOTH,
+ code => 0xa2e8,
+ ucs => 0x327e,
+ comment => 'CIRCLED HANGUL IEUNG U',
+ f => $this_script,
+ l => __LINE__ };
print_conversion_tables($this_script, "UHC", \@mapping);
use Exporter 'import';
-our @EXPORT = qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables);
+our @EXPORT =
+ qw( NONE TO_UNICODE FROM_UNICODE BOTH read_source print_conversion_tables);
# Constants used in the 'direction' field of the character maps
use constant {
NONE => 0,
TO_UNICODE => 1,
FROM_UNICODE => 2,
- BOTH => 3
-};
+ BOTH => 3 };
#######################################################################
# read_source - common routine to read source file
next if (/^#/);
chop;
- next if (/^$/); # Ignore empty lines
+ next if (/^$/); # Ignore empty lines
next if (/^0x([0-9A-F]+)\s+(#.*)$/);
print STDERR "READ ERROR at line $. in $fname: $_\n";
exit;
}
- my $out = {code => hex($1),
- ucs => hex($2),
- comment => $4,
- direction => BOTH,
- f => $fname,
- l => $.
- };
+ my $out = {
+ code => hex($1),
+ ucs => hex($2),
+ comment => $4,
+ direction => BOTH,
+ f => $fname,
+ l => $. };
# Ignore pure ASCII mappings. PostgreSQL character conversion code
# never even passes these to the conversion code.
{
my ($this_script, $csname, $charset) = @_;
- print_conversion_tables_direction($this_script, $csname, FROM_UNICODE, $charset);
- print_conversion_tables_direction($this_script, $csname, TO_UNICODE, $charset);
+ print_conversion_tables_direction($this_script, $csname, FROM_UNICODE,
+ $charset);
+ print_conversion_tables_direction($this_script, $csname, TO_UNICODE,
+ $charset);
}
#############################################################################
my $tblname;
if ($direction == TO_UNICODE)
{
- $fname = lc("${csname}_to_utf8.map");
+ $fname = lc("${csname}_to_utf8.map");
$tblname = lc("${csname}_to_unicode_tree");
print "- Writing ${csname}=>UTF8 conversion table: $fname\n";
}
else
{
- $fname = lc("utf8_to_${csname}.map");
+ $fname = lc("utf8_to_${csname}.map");
$tblname = lc("${csname}_from_unicode_tree");
print "- Writing UTF8=>${csname} conversion table: $fname\n";
print $out "/* src/backend/utils/mb/Unicode/$fname */\n";
print $out "/* This file is generated by $this_script */\n\n";
- # Collect regular, non-combined, mappings, and create the radix tree from them.
+# Collect regular, non-combined, mappings, and create the radix tree from them.
my $charmap = &make_charmap($out, $charset, $direction, 0);
print_radix_table($out, $tblname, $charmap);
- # Collect combined characters, and create combined character table (if any)
+ # Collect combined characters, and create combined character table (if any)
my $charmap_combined = &make_charmap_combined($charset, $direction);
if (scalar @{$charmap_combined} > 0)
{
if ($direction == TO_UNICODE)
{
- print_to_utf8_combined_map($out, $csname,
- $charmap_combined, 1);
+ print_to_utf8_combined_map($out, $csname, $charmap_combined, 1);
}
else
{
- print_from_utf8_combined_map($out, $csname,
- $charmap_combined, 1);
+ print_from_utf8_combined_map($out, $csname, $charmap_combined, 1);
}
}
my $last_comment = "";
printf $out "\n/* Combined character map */\n";
- printf $out "static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {",
+ printf $out
+"static const pg_utf_to_local_combined ULmap${charset}_combined[ %d ] = {",
scalar(@$table);
my $first = 1;
- foreach my $i (sort {$a->{utf8} <=> $b->{utf8}} @$table)
- {
+ foreach my $i (sort { $a->{utf8} <=> $b->{utf8} } @$table)
+ {
print($out ",") if (!$first);
$first = 0;
- print $out "\t/* $last_comment */" if ($verbose && $last_comment ne "");
+ print $out "\t/* $last_comment */"
+ if ($verbose && $last_comment ne "");
printf $out "\n {0x%08x, 0x%08x, 0x%04x}",
$i->{utf8}, $i->{utf8_second}, $i->{code};
my $last_comment = "";
printf $out "\n/* Combined character map */\n";
- printf $out "static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {",
+ printf $out
+"static const pg_local_to_utf_combined LUmap${charset}_combined[ %d ] = {",
scalar(@$table);
my $first = 1;
- foreach my $i (sort {$a->{code} <=> $b->{code}} @$table)
- {
+ foreach my $i (sort { $a->{code} <=> $b->{code} } @$table)
+ {
print($out ",") if (!$first);
$first = 0;
- print $out "\t/* $last_comment */" if ($verbose && $last_comment ne "");
+ print $out "\t/* $last_comment */"
+ if ($verbose && $last_comment ne "");
printf $out "\n {0x%04x, 0x%08x, 0x%08x}",
$i->{code}, $i->{utf8}, $i->{utf8_second};
if ($verbose >= 2)
{
$last_comment =
- sprintf("%s:%d %s", $i->{f}, $i->{l}, $i->{comment});
+ sprintf("%s:%d %s", $i->{f}, $i->{l}, $i->{comment});
}
elsif ($verbose >= 1)
{
}
elsif ($in < 0x10000)
{
- my $b1 = $in >> 8;
- my $b2 = $in & 0xff;
+ my $b1 = $in >> 8;
+ my $b2 = $in & 0xff;
$b2map{$b1}{$b2} = $out;
}
elsif ($in < 0x1000000)
{
- my $b1 = $in >> 16;
- my $b2 = ($in >> 8) & 0xff;
- my $b3 = $in & 0xff;
+ my $b1 = $in >> 16;
+ my $b2 = ($in >> 8) & 0xff;
+ my $b3 = $in & 0xff;
$b3map{$b1}{$b2}{$b3} = $out;
}
elsif ($in < 0x100000000)
{
- my $b1 = $in >> 24;
- my $b2 = ($in >> 16) & 0xff;
- my $b3 = ($in >> 8) & 0xff;
- my $b4 = $in & 0xff;
+ my $b1 = $in >> 24;
+ my $b2 = ($in >> 16) & 0xff;
+ my $b3 = ($in >> 8) & 0xff;
+ my $b4 = $in & 0xff;
$b4map{$b1}{$b2}{$b3}{$b4} = $out;
}
###
# Add the segments for the radix trees themselves.
- push @segments, build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map);
- push @segments, build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map);
- push @segments, build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map);
- push @segments, build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map);
+ push @segments,
+ build_segments_from_tree("Single byte table", "1-byte", 1, \%b1map);
+ push @segments,
+ build_segments_from_tree("Two byte table", "2-byte", 2, \%b2map);
+ push @segments,
+ build_segments_from_tree("Three byte table", "3-byte", 3, \%b3map);
+ push @segments,
+ build_segments_from_tree("Four byte table", "4-byte", 4, \%b4map);
###
### Find min and max index used in each level of each tree.
my %max_idx;
foreach my $seg (@segments)
{
- my $this_min = $min_idx{$seg->{depth}}->{$seg->{level}};
- my $this_max = $max_idx{$seg->{depth}}->{$seg->{level}};
+ my $this_min = $min_idx{ $seg->{depth} }->{ $seg->{level} };
+ my $this_max = $max_idx{ $seg->{depth} }->{ $seg->{level} };
- foreach my $i (keys %{$seg->{values}})
+ foreach my $i (keys %{ $seg->{values} })
{
$this_min = $i if (!defined $this_min || $i < $this_min);
$this_max = $i if (!defined $this_max || $i > $this_max);
}
- $min_idx{$seg->{depth}}{$seg->{level}} = $this_min;
- $max_idx{$seg->{depth}}{$seg->{level}} = $this_max;
+ $min_idx{ $seg->{depth} }{ $seg->{level} } = $this_min;
+ $max_idx{ $seg->{depth} }{ $seg->{level} } = $this_max;
}
+
# Copy the mins and max's back to every segment, for convenience.
foreach my $seg (@segments)
{
- $seg->{min_idx} = $min_idx{$seg->{depth}}{$seg->{level}};
- $seg->{max_idx} = $max_idx{$seg->{depth}}{$seg->{level}};
+ $seg->{min_idx} = $min_idx{ $seg->{depth} }{ $seg->{level} };
+ $seg->{max_idx} = $max_idx{ $seg->{depth} }{ $seg->{level} };
}
###
$widest_range = $this_range if ($this_range > $widest_range);
}
- unshift @segments, {
- header => "Dummy map, for invalid values",
+ unshift @segments,
+ { header => "Dummy map, for invalid values",
min_idx => 0,
- max_idx => $widest_range
- };
+ max_idx => $widest_range };
###
### Eliminate overlapping zeros
###
for (my $j = 0; $j < $#segments - 1; $j++)
{
- my $seg = $segments[$j];
- my $nextseg = $segments[$j + 1];
+ my $seg = $segments[$j];
+ my $nextseg = $segments[ $j + 1 ];
# Count the number of zero values at the end of this segment.
my $this_trail_zeros = 0;
- for (my $i = $seg->{max_idx}; $i >= $seg->{min_idx} && !$seg->{values}->{$i}; $i--)
+ for (
+ my $i = $seg->{max_idx};
+ $i >= $seg->{min_idx} && !$seg->{values}->{$i};
+ $i--)
{
$this_trail_zeros++;
}
# Count the number of zeros at the beginning of next segment.
my $next_lead_zeros = 0;
- for (my $i = $nextseg->{min_idx}; $i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i}; $i++)
+ for (
+ my $i = $nextseg->{min_idx};
+ $i <= $nextseg->{max_idx} && !$nextseg->{values}->{$i};
+ $i++)
{
$next_lead_zeros++;
}
# How many zeros in common?
my $overlaid_trail_zeros =
- ($this_trail_zeros > $next_lead_zeros) ? $next_lead_zeros : $this_trail_zeros;
+ ($this_trail_zeros > $next_lead_zeros)
+ ? $next_lead_zeros
+ : $this_trail_zeros;
$seg->{overlaid_trail_zeros} = $overlaid_trail_zeros;
$seg->{max_idx} = $seg->{max_idx} - $overlaid_trail_zeros;
foreach my $seg (@segments)
{
$seg->{offset} = $flatoff;
- $segmap{$seg->{label}} = $flatoff;
+ $segmap{ $seg->{label} } = $flatoff;
$flatoff += $seg->{max_idx} - $seg->{min_idx} + 1;
}
my $tblsize = $flatoff;
# Second pass: look up the offset of each label reference in the hash.
foreach my $seg (@segments)
{
- while (my ($i, $val) = each %{$seg->{values}})
+ while (my ($i, $val) = each %{ $seg->{values} })
{
- if (!($val =~ /^[0-9,.E]+$/ ))
+ if (!($val =~ /^[0-9,.E]+$/))
{
my $segoff = $segmap{$val};
if ($segoff)
my $max_val = 0;
foreach my $seg (@segments)
{
- foreach my $val (values %{$seg->{values}})
+ foreach my $val (values %{ $seg->{values} })
{
$max_val = $val if ($val > $max_val);
}
if ($max_val <= 0xffff)
{
$vals_per_line = 8;
- $colwidth = 4;
+ $colwidth = 4;
}
elsif ($max_val <= 0xffffff)
{
$vals_per_line = 4;
- $colwidth = 6;
+ $colwidth = 6;
}
else
{
$vals_per_line = 4;
- $colwidth = 8;
+ $colwidth = 8;
}
###
print $out " ${tblname}_table,\n";
}
printf $out "\n";
- printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n", $b1root;
+ printf $out " 0x%04x, /* offset of table for 1-byte inputs */\n",
+ $b1root;
printf $out " 0x%02x, /* b1_lower */\n", $b1_lower;
printf $out " 0x%02x, /* b1_upper */\n", $b1_upper;
printf $out "\n";
- printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n", $b2root;
+ printf $out " 0x%04x, /* offset of table for 2-byte inputs */\n",
+ $b2root;
printf $out " 0x%02x, /* b2_1_lower */\n", $b2_1_lower;
printf $out " 0x%02x, /* b2_1_upper */\n", $b2_1_upper;
printf $out " 0x%02x, /* b2_2_lower */\n", $b2_2_lower;
printf $out " 0x%02x, /* b2_2_upper */\n", $b2_2_upper;
printf $out "\n";
- printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b3root;
+ printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
+ $b3root;
printf $out " 0x%02x, /* b3_1_lower */\n", $b3_1_lower;
printf $out " 0x%02x, /* b3_1_upper */\n", $b3_1_upper;
printf $out " 0x%02x, /* b3_2_lower */\n", $b3_2_lower;
printf $out " 0x%02x, /* b3_3_lower */\n", $b3_3_lower;
printf $out " 0x%02x, /* b3_3_upper */\n", $b3_3_upper;
printf $out "\n";
- printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n", $b4root;
+ printf $out " 0x%04x, /* offset of table for 3-byte inputs */\n",
+ $b4root;
printf $out " 0x%02x, /* b4_1_lower */\n", $b4_1_lower;
printf $out " 0x%02x, /* b4_1_upper */\n", $b4_1_upper;
printf $out " 0x%02x, /* b4_2_lower */\n", $b4_2_lower;
print $out "static const $datatype ${tblname}_table[$tblsize] =\n";
print $out "{";
my $off = 0;
+
foreach my $seg (@segments)
{
printf $out "\n";
printf $out " /*** %s - offset 0x%05x ***/\n", $seg->{header}, $off;
printf $out "\n";
- for (my $i=$seg->{min_idx}; $i <= $seg->{max_idx};)
+ for (my $i = $seg->{min_idx}; $i <= $seg->{max_idx};)
{
+
# Print the next line's worth of values.
# XXX pad to begin at a nice boundary
printf $out " /* %02x */ ", $i;
- for (my $j = 0; $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
+ for (my $j = 0;
+ $j < $vals_per_line && $i <= $seg->{max_idx}; $j++)
{
my $val = $seg->{values}->{$i};
}
if ($seg->{overlaid_trail_zeros})
{
- printf $out " /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n";
+ printf $out
+" /* $seg->{overlaid_trail_zeros} trailing zero values shared with next segment */\n";
}
}
if (%{$map})
{
- @segments = build_segments_recurse($header, $rootlabel, "", 1, $depth, $map);
+ @segments =
+ build_segments_recurse($header, $rootlabel, "", 1, $depth, $map);
# Sort the segments into "breadth-first" order. Not strictly required,
# but makes the maps nicer to read.
- @segments = sort { $a->{level} cmp $b->{level} or
- $a->{path} cmp $b->{path}}
- @segments;
+ @segments =
+ sort { $a->{level} cmp $b->{level} or $a->{path} cmp $b->{path} }
+ @segments;
}
return @segments;
if ($level == $depth)
{
- push @segments, {
- header => $header . ", leaf: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
- values => $map
- };
+ push @segments,
+ { header => $header . ", leaf: ${path}xx",
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
+ values => $map };
}
else
{
my $childpath = $path . sprintf("%02x", $i);
my $childlabel = "$depth-level-$level-$childpath";
- push @segments, build_segments_recurse($header, $childlabel, $childpath,
- $level + 1, $depth, $val);
+ push @segments,
+ build_segments_recurse($header, $childlabel, $childpath,
+ $level + 1, $depth, $val);
$children{$i} = $childlabel;
}
- push @segments, {
- header => $header . ", byte #$level: ${path}xx",
- label => $label,
- level => $level,
- depth => $depth,
- path => $path,
- values => \%children
- };
+ push @segments,
+ { header => $header . ", byte #$level: ${path}xx",
+ label => $label,
+ level => $level,
+ depth => $depth,
+ path => $path,
+ values => \%children };
}
return @segments;
}
my %charmap;
foreach my $c (@$charset)
{
+
# combined characters are handled elsewhere
next if (defined $c->{ucs_second});
next if ($c->{direction} != $direction && $c->{direction} != BOTH);
my ($src, $dst) =
- $direction == TO_UNICODE
- ? ($c->{code}, ucs2utf($c->{ucs}))
- : (ucs2utf($c->{ucs}), $c->{code});
+ $direction == TO_UNICODE
+ ? ($c->{code}, ucs2utf($c->{ucs}))
+ : (ucs2utf($c->{ucs}), $c->{code});
# check for duplicate source codes
if (defined $charmap{$src})
{
printf STDERR
- "Error: duplicate source code on %s:%d: 0x%04x => 0x%04x, 0x%04x\n",
- $c->{f}, $c->{l}, $src, $charmap{$src}, $dst;
+"Error: duplicate source code on %s:%d: 0x%04x => 0x%04x, 0x%04x\n",
+ $c->{f}, $c->{l}, $src, $charmap{$src}, $dst;
exit;
}
$charmap{$src} = $dst;
if ($verbose)
{
- printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f}, $c->{l}, $c->{comment};
+ printf $out "0x%04x 0x%04x %s:%d %s\n", $src, $dst, $c->{f},
+ $c->{l}, $c->{comment};
}
}
if ($verbose)
if (defined $c->{ucs_second})
{
- my $entry = {utf8 => ucs2utf($c->{ucs}),
- utf8_second => ucs2utf($c->{ucs_second}),
- code => $c->{code},
- comment => $c->{comment},
- f => $c->{f}, l => $c->{l}};
+ my $entry = {
+ utf8 => ucs2utf($c->{ucs}),
+ utf8_second => ucs2utf($c->{ucs_second}),
+ code => $c->{code},
+ comment => $c->{comment},
+ f => $c->{f},
+ l => $c->{l} };
push @combined, $entry;
}
}
# make sure we run one successful test without a TZ setting so we test
# initdb's time zone setting code
{
+
# delete local only works from perl 5.12, so use the older way to do this
local (%ENV) = %ENV;
delete $ENV{TZ};
[ 'pg_basebackup', '-D', "$tempdir/backup" ],
'pg_basebackup fails because of WAL configuration');
-ok(! -d "$tempdir/backup", 'backup directory was cleaned up');
+ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
+$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup", '-n' ],
'failing run with no-clean option');
ok(-d "$tempdir/backup", 'backup directory was created and left behind');
$node->restart;
# Write some files to test that they are not copied.
-foreach my $filename (qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp))
+foreach my $filename (
+ qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp)
+ )
{
open my $file, '>>', "$pgdata/$filename";
print $file "DONOTCOPY";
'no WAL files copied');
# Contents of these directories should not be copied.
-foreach my $dirname (qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans))
+foreach my $dirname (
+ qw(pg_dynshmem pg_notify pg_replslot pg_serial pg_snapshots pg_stat_tmp pg_subtrans)
+ )
{
is_deeply(
[ sort(slurp_dir("$tempdir/backup/$dirname/")) ],
}
# These files should not be copied.
-foreach my $filename (qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp))
+foreach my $filename (
+ qw(postgresql.auto.conf.tmp postmaster.opts postmaster.pid tablespace_map current_logfiles.tmp)
+ )
{
- ok(! -f "$tempdir/backup/$filename", "$filename not copied");
+ ok(!-f "$tempdir/backup/$filename", "$filename not copied");
}
# Make sure existing backup_label was ignored.
-isnt(slurp_file("$tempdir/backup/backup_label"), 'DONOTCOPY',
- 'existing backup_label not copied');
+isnt(slurp_file("$tempdir/backup/backup_label"),
+ 'DONOTCOPY', 'existing backup_label not copied');
$node->command_ok(
[ 'pg_basebackup', '-D', "$tempdir/backup2", '--waldir',
my $superlongname = "superlongname_" . ("x" x 100);
my $superlongpath = "$pgdata/$superlongname";
-open my $file, '>', "$superlongpath" or die "unable to create file $superlongpath";
+open my $file, '>', "$superlongpath"
+ or die "unable to create file $superlongpath";
close $file;
$node->command_fails(
[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
$node->stop;
rename("$pgdata/pg_replslot", "$tempdir/pg_replslot")
- or BAIL_OUT "could not move $pgdata/pg_replslot";
+ or BAIL_OUT "could not move $pgdata/pg_replslot";
symlink("$tempdir/pg_replslot", "$pgdata/pg_replslot")
- or BAIL_OUT "could not symlink to $pgdata/pg_replslot";
+ or BAIL_OUT "could not symlink to $pgdata/pg_replslot";
$node->start;
"tablespace symlink was updated");
closedir $dh;
- ok(-d "$tempdir/backup1/pg_replslot", 'pg_replslot symlink copied as directory');
+ ok( -d "$tempdir/backup1/pg_replslot",
+ 'pg_replslot symlink copied as directory');
mkdir "$tempdir/tbl=spc2";
$node->safe_psql('postgres', "DROP TABLE test1;");
qr/^primary_conninfo = '.*port=$port.*'\n/m,
'recovery.conf sets primary_conninfo');
-$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupxd" ],
+$node->command_ok(
+ [ 'pg_basebackup', '-D', "$tempdir/backupxd" ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
$node->command_ok(
- [ 'pg_basebackup', '-D', "$tempdir/backupnoslot", '-X', 'stream', '--no-slot' ],
+ [ 'pg_basebackup', '-D',
+ "$tempdir/backupnoslot", '-X',
+ 'stream', '--no-slot' ],
'pg_basebackup -X stream runs with --no-slot');
$node->command_fails(
# Initialize node without replication settings
$node->init(allows_streaming => 1, has_archiving => 1);
-$node->append_conf('postgresql.conf', q{
+$node->append_conf(
+ 'postgresql.conf', q{
wal_level = 'logical'
max_replication_slots = 4
max_wal_senders = 4
$node->dump_info;
$node->start;
-$node->command_fails(['pg_recvlogical'],
- 'pg_recvlogical needs a slot name');
-$node->command_fails(['pg_recvlogical', '-S', 'test'],
+$node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name');
+$node->command_fails([ 'pg_recvlogical', '-S', 'test' ],
'pg_recvlogical needs a database');
-$node->command_fails(['pg_recvlogical', '-S', 'test', '-d', 'postgres'],
+$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ],
'pg_recvlogical needs an action');
-$node->command_fails(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start'],
+$node->command_fails(
+ [ 'pg_recvlogical', '-S',
+ 'test', '-d',
+ $node->connstr('postgres'), '--start' ],
'no destination file');
-$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--create-slot'],
+$node->command_ok(
+ [ 'pg_recvlogical', '-S',
+ 'test', '-d',
+ $node->connstr('postgres'), '--create-slot' ],
'slot created');
my $slot = $node->slot('test');
isnt($slot->{'restart_lsn'}, '', 'restart lsn is defined for new slot');
$node->psql('postgres', 'CREATE TABLE test_table(x integer)');
-$node->psql('postgres', 'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);');
-my $nextlsn = $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
+$node->psql('postgres',
+ 'INSERT INTO test_table(x) SELECT y FROM generate_series(1, 10) a(y);');
+my $nextlsn =
+ $node->safe_psql('postgres', 'SELECT pg_current_wal_insert_lsn()');
chomp($nextlsn);
-$node->command_ok(['pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-'],
+$node->command_ok(
+ [ 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'),
+ '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-' ],
'replayed a transaction');
'configure authentication');
open my $conf, '>>', "$tempdir/data/postgresql.conf";
print $conf "fsync = off\n";
-if (! $windows_os)
+if (!$windows_os)
{
print $conf "listen_addresses = ''\n";
print $conf "unix_socket_directories = '$tempdir_short'\n";
print $conf "listen_addresses = '127.0.0.1'\n";
}
close $conf;
-command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
- 'pg_ctl start');
+command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], 'pg_ctl start');
# sleep here is because Windows builds can't check postmaster.pid exactly,
# so they may mistake a pre-existing postmaster.pid for one created by the
sleep 3 if ($windows_os);
command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
'second pg_ctl start fails');
-command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
- 'pg_ctl stop');
+command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop');
command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ],
'second pg_ctl stop fails');
-command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
+command_ok(
+ [ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server not running');
command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ],
'pg_ctl restart with server running');
my $tempdir = TestLib::tempdir;
-command_fails_like([ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
- qr/directory .* does not exist/,
- 'pg_ctl promote with nonexistent directory');
+command_fails_like(
+ [ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ],
+ qr/directory .* does not exist/,
+ 'pg_ctl promote with nonexistent directory');
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
-command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
- qr/PID file .* does not exist/,
- 'pg_ctl promote of not running instance fails');
+command_fails_like(
+ [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ qr/PID file .* does not exist/,
+ 'pg_ctl promote of not running instance fails');
$node_primary->start;
-command_fails_like([ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
- qr/not in standby mode/,
- 'pg_ctl promote of primary instance fails');
+command_fails_like(
+ [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ],
+ qr/not in standby mode/,
+ 'pg_ctl promote of primary instance fails');
my $node_standby = get_new_node('standby');
$node_primary->backup('my_backup');
-$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1);
+$node_standby->init_from_backup($node_primary, 'my_backup',
+ has_streaming => 1);
$node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
- 't', 'standby is in recovery');
+ 't', 'standby is in recovery');
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ],
- 'pg_ctl -W promote of standby runs');
+ 'pg_ctl -W promote of standby runs');
-ok($node_standby->poll_query_until('postgres', 'SELECT NOT pg_is_in_recovery()'),
- 'promoted standby is not in recovery');
+ok( $node_standby->poll_query_until(
+ 'postgres', 'SELECT NOT pg_is_in_recovery()'),
+ 'promoted standby is not in recovery');
# same again with default wait option
$node_standby = get_new_node('standby2');
-$node_standby->init_from_backup($node_primary, 'my_backup', has_streaming => 1);
+$node_standby->init_from_backup($node_primary, 'my_backup',
+ has_streaming => 1);
$node_standby->start;
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
- 't', 'standby is in recovery');
+ 't', 'standby is in recovery');
command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ],
- 'pg_ctl promote of standby runs');
+ 'pg_ctl promote of standby runs');
# no wait here
is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'),
- 'f', 'promoted standby is not in recovery');
+ 'f', 'promoted standby is not in recovery');
'-d', 'postgres', # alternative way to specify database
],
restore_cmd => [
- 'pg_restore', '-Fc',
- '--verbose',
+ 'pg_restore', '-Fc', '--verbose',
"--file=$tempdir/binary_upgrade.sql",
"$tempdir/binary_upgrade.dump", ], },
clean => {
'postgres', ], },
column_inserts => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/column_inserts.sql",
- '-a',
- '--column-inserts',
- 'postgres', ], },
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/column_inserts.sql", '-a',
+ '--column-inserts', 'postgres', ], },
createdb => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/createdb.sql",
'-C',
- '-R', # no-op, just for testing
+ '-R', # no-op, just for testing
'-v',
'postgres', ], },
data_only => {
'-a',
'--superuser=test_superuser',
'--disable-triggers',
- '-v', # no-op, just make sure it works
+ '-v', # no-op, just make sure it works
'postgres', ], },
defaults => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- '-f',
- "$tempdir/defaults.sql",
+ 'pg_dump', '--no-sync',
+ '-f', "$tempdir/defaults.sql",
'postgres', ], },
+
# Do not use --no-sync to give test coverage for data sync.
defaults_custom_format => {
test_key => 'defaults',
'pg_restore', '-Fc',
"--file=$tempdir/defaults_custom_format.sql",
"$tempdir/defaults_custom_format.dump", ], },
+
# Do not use --no-sync to give test coverage for data sync.
defaults_dir_format => {
test_key => 'defaults',
'pg_restore', '-Fd',
"--file=$tempdir/defaults_dir_format.sql",
"$tempdir/defaults_dir_format", ], },
+
# Do not use --no-sync to give test coverage for data sync.
defaults_parallel => {
test_key => 'defaults',
'pg_restore',
"--file=$tempdir/defaults_parallel.sql",
"$tempdir/defaults_parallel", ], },
+
# Do not use --no-sync to give test coverage for data sync.
defaults_tar_format => {
test_key => 'defaults',
"$tempdir/defaults_tar_format.tar", ], },
exclude_dump_test_schema => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/exclude_dump_test_schema.sql",
- '--exclude-schema=dump_test',
- 'postgres', ], },
+ '--exclude-schema=dump_test', 'postgres', ], },
exclude_test_table => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/exclude_test_table.sql",
- '--exclude-table=dump_test.test_table',
- 'postgres', ], },
+ '--exclude-table=dump_test.test_table', 'postgres', ], },
exclude_test_table_data => {
dump_cmd => [
'pg_dump',
'postgres', ], },
pg_dumpall_globals => {
dump_cmd => [
- 'pg_dumpall', '-v',
- "--file=$tempdir/pg_dumpall_globals.sql", '-g',
- '--no-sync', ], },
+ 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql",
+ '-g', '--no-sync', ], },
pg_dumpall_globals_clean => {
dump_cmd => [
- 'pg_dumpall',
- "--file=$tempdir/pg_dumpall_globals_clean.sql",
- '-g',
- '-c',
- '--no-sync', ], },
+ 'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql",
+ '-g', '-c', '--no-sync', ], },
pg_dumpall_dbprivs => {
dump_cmd => [
- 'pg_dumpall',
- '--no-sync',
+ 'pg_dumpall', '--no-sync',
"--file=$tempdir/pg_dumpall_dbprivs.sql", ], },
no_blobs => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_blobs.sql",
- '-B',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_blobs.sql", '-B',
'postgres', ], },
no_privs => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_privs.sql",
- '-x',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_privs.sql", '-x',
'postgres', ], },
no_owner => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_owner.sql",
- '-O',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_owner.sql", '-O',
'postgres', ], },
only_dump_test_schema => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
+ 'pg_dump', '--no-sync',
"--file=$tempdir/only_dump_test_schema.sql",
- '--schema=dump_test',
- 'postgres', ], },
+ '--schema=dump_test', 'postgres', ], },
only_dump_test_table => {
dump_cmd => [
'pg_dump',
"$tempdir/role_parallel", ], },
schema_only => {
dump_cmd => [
- 'pg_dump', '--format=plain', "--file=$tempdir/schema_only.sql",
- '--no-sync', '-s', 'postgres', ], },
+ 'pg_dump', '--format=plain',
+ "--file=$tempdir/schema_only.sql", '--no-sync',
+ '-s', 'postgres', ], },
section_pre_data => {
dump_cmd => [
'pg_dump', "--file=$tempdir/section_pre_data.sql",
- '--section=pre-data', '--no-sync', 'postgres', ], },
+ '--section=pre-data', '--no-sync',
+ 'postgres', ], },
section_data => {
dump_cmd => [
'pg_dump', "--file=$tempdir/section_data.sql",
- '--section=data', '--no-sync', 'postgres', ], },
+ '--section=data', '--no-sync',
+ 'postgres', ], },
section_post_data => {
dump_cmd => [
- 'pg_dump', "--file=$tempdir/section_post_data.sql",
+ 'pg_dump', "--file=$tempdir/section_post_data.sql",
'--section=post-data', '--no-sync', 'postgres', ], },
test_schema_plus_blobs => {
dump_cmd => [
'--schema=dump_test', '-b', '-B', '--no-sync', 'postgres', ], },
with_oids => {
dump_cmd => [
- 'pg_dump', '--oids',
- '--no-sync',
- "--file=$tempdir/with_oids.sql", 'postgres', ], },);
+ 'pg_dump', '--oids',
+ '--no-sync', "--file=$tempdir/with_oids.sql",
+ 'postgres', ], },);
###############################################################
# Definition of the tests to run.
all_runs => 1,
catch_all =>
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
- regexp => qr/^ALTER COLLATION test0 OWNER TO .*;/m,
+ regexp => qr/^ALTER COLLATION test0 OWNER TO .*;/m,
collation => 1,
- like => {
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
role => 1, }, },
'ALTER OPERATOR FAMILY dump_test.op_family USING btree' => {
- all_runs => 1,
+ all_runs => 1,
create_order => 75,
- create_sql => 'ALTER OPERATOR FAMILY dump_test.op_family USING btree ADD
+ create_sql =>
+ 'ALTER OPERATOR FAMILY dump_test.op_family USING btree ADD
OPERATOR 1 <(bigint,int4),
OPERATOR 2 <=(bigint,int4),
OPERATOR 3 =(bigint,int4),
section_data => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90' => {
- all_runs => 1,
- catch_all => 'ALTER TABLE ... commands',
+ all_runs => 1,
+ catch_all => 'ALTER TABLE ... commands',
create_order => 93,
- create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col1 SET STATISTICS 90;',
- regexp => qr/^
+ create_sql =>
+'ALTER TABLE dump_test.test_table ALTER COLUMN col1 SET STATISTICS 90;',
+ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col1 SET STATISTICS 90;\E\n
/xm,
like => {
section_data => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE' => {
- all_runs => 1,
- catch_all => 'ALTER TABLE ... commands',
+ all_runs => 1,
+ catch_all => 'ALTER TABLE ... commands',
create_order => 94,
- create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;',
- regexp => qr/^
+ create_sql =>
+'ALTER TABLE dump_test.test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;',
+ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col2 SET STORAGE EXTERNAL;\E\n
/xm,
like => {
only_dump_test_table => 1,
pg_dumpall_dbprivs => 1,
schema_only => 1,
- section_pre_data => 1,
+ section_pre_data => 1,
test_schema_plus_blobs => 1,
with_oids => 1, },
unlike => {
exclude_dump_test_schema => 1,
exclude_test_table => 1,
role => 1,
- section_post_data => 1,
+ section_post_data => 1,
section_data => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE' => {
- all_runs => 1,
- catch_all => 'ALTER TABLE ... commands',
+ all_runs => 1,
+ catch_all => 'ALTER TABLE ... commands',
create_order => 95,
- create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col3 SET STORAGE MAIN;',
- regexp => qr/^
+ create_sql =>
+'ALTER TABLE dump_test.test_table ALTER COLUMN col3 SET STORAGE MAIN;',
+ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col3 SET STORAGE MAIN;\E\n
/xm,
like => {
section_data => 1, }, },
'ALTER TABLE ONLY test_table ALTER COLUMN col4 SET n_distinct' => {
- all_runs => 1,
- catch_all => 'ALTER TABLE ... commands',
+ all_runs => 1,
+ catch_all => 'ALTER TABLE ... commands',
create_order => 95,
- create_sql => 'ALTER TABLE dump_test.test_table ALTER COLUMN col4 SET (n_distinct = 10);',
- regexp => qr/^
+ create_sql =>
+'ALTER TABLE dump_test.test_table ALTER COLUMN col4 SET (n_distinct = 10);',
+ regexp => qr/^
\QALTER TABLE ONLY test_table ALTER COLUMN col4 SET (n_distinct=10);\E\n
/xm,
like => {
section_post_data => 1,
section_data => 1, }, },
- 'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2' => {
- all_runs => 1,
- regexp => qr/^
+'ALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2'
+ => {
+ all_runs => 1,
+ regexp => qr/^
\QALTER TABLE ONLY dump_test.measurement ATTACH PARTITION measurement_y2006m2 \E
\QFOR VALUES FROM ('2006-02-01') TO ('2006-03-01');\E\n
/xm,
- like => {
- binary_upgrade => 1, },
+ like => { binary_upgrade => 1, },
unlike => {
clean => 1,
clean_if_exists => 1,
section_data => 1, }, },
'ALTER TABLE test_table CLUSTER ON test_table_pkey' => {
- all_runs => 1,
- catch_all => 'ALTER TABLE ... commands',
+ all_runs => 1,
+ catch_all => 'ALTER TABLE ... commands',
create_order => 96,
- create_sql => 'ALTER TABLE dump_test.test_table CLUSTER ON test_table_pkey',
- regexp => qr/^
+ create_sql =>
+ 'ALTER TABLE dump_test.test_table CLUSTER ON test_table_pkey',
+ regexp => qr/^
\QALTER TABLE test_table CLUSTER ON test_table_pkey;\E\n
/xm,
like => {
with_oids => 1, }, },
'ALTER FOREIGN TABLE foreign_table ALTER COLUMN c1 OPTIONS' => {
- all_runs => 1,
+ all_runs => 1,
catch_all => 'ALTER TABLE ... commands',
- regexp => qr/^
+ regexp => qr/^
\QALTER FOREIGN TABLE foreign_table ALTER COLUMN c1 OPTIONS (\E\n
\s+\Qcolumn_name 'col1'\E\n
\Q);\E\n
/xm,
- like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- no_owner => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ like => {
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ no_owner => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
exclude_dump_test_schema => 1,
data_only => 1,
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
regexp => qr/^ALTER TABLE measurement OWNER TO .*;/m,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
data_only => 1,
exclude_dump_test_schema => 1,
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
regexp => qr/^ALTER FOREIGN TABLE foreign_table OWNER TO .*;/m,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
exclude_dump_test_schema => 1,
data_only => 1,
all_runs => 1,
catch_all =>
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
- regexp => qr/^ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO .*;/m,
- like => {
+ regexp =>
+ qr/^ALTER TEXT SEARCH CONFIGURATION alt_ts_conf1 OWNER TO .*;/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
all_runs => 1,
catch_all =>
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)',
- regexp => qr/^ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO .*;/m,
- like => {
+ regexp =>
+ qr/^ALTER TEXT SEARCH DICTIONARY alt_ts_dict1 OWNER TO .*;/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
# catch-all for ALTER ... OWNER (except LARGE OBJECTs and PUBLICATIONs)
'ALTER ... OWNER commands (except LARGE OBJECTs and PUBLICATIONs)' => {
all_runs => 0, # catch-all
- regexp => qr/^ALTER (?!LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .*;/m,
+ regexp =>
+qr/^ALTER (?!LARGE OBJECT|PUBLICATION|SUBSCRIPTION)(.*) OWNER TO .*;/m,
like => {}, # use more-specific options above
unlike => {
column_inserts => 1,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
role => 1,
- section_post_data => 1,
+ section_post_data => 1,
test_schema_plus_blobs => 1, }, },
'COMMENT ON EXTENSION plpgsql' => {
with_oids => 1, },
unlike => {
binary_upgrade => 1,
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
role => 1,
- section_post_data => 1,
+ section_post_data => 1,
test_schema_plus_blobs => 1, }, },
'COMMENT ON TABLE dump_test.test_table' => {
create_order => 79,
create_sql => 'COMMENT ON CONVERSION dump_test.test_conversion
IS \'comment on test conversion\';',
- regexp => qr/^COMMENT ON CONVERSION test_conversion IS 'comment on test conversion';/m,
- like => {
+ regexp =>
+qr/^COMMENT ON CONVERSION test_conversion IS 'comment on test conversion';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_order => 77,
create_sql => 'COMMENT ON COLLATION test0
IS \'comment on test0 collation\';',
- regexp => qr/^COMMENT ON COLLATION test0 IS 'comment on test0 collation';/m,
+ regexp =>
+ qr/^COMMENT ON COLLATION test0 IS 'comment on test0 collation';/m,
collation => 1,
- like => {
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_order => 55,
create_sql => 'COMMENT ON PUBLICATION pub1
IS \'comment on publication\';',
- regexp => qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m,
- like => {
+ regexp =>
+ qr/^COMMENT ON PUBLICATION pub1 IS 'comment on publication';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_order => 55,
create_sql => 'COMMENT ON SUBSCRIPTION sub1
IS \'comment on subscription\';',
- regexp => qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m,
- like => {
+ regexp =>
+ qr/^COMMENT ON SUBSCRIPTION sub1 IS 'comment on subscription';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
all_runs => 1,
catch_all => 'COMMENT commands',
create_order => 84,
- create_sql => 'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1
+ create_sql =>
+ 'COMMENT ON TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1
IS \'comment on text search configuration\';',
- regexp => qr/^COMMENT ON TEXT SEARCH CONFIGURATION alt_ts_conf1 IS 'comment on text search configuration';/m,
- like => {
+ regexp =>
+qr/^COMMENT ON TEXT SEARCH CONFIGURATION alt_ts_conf1 IS 'comment on text search configuration';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
all_runs => 1,
catch_all => 'COMMENT commands',
create_order => 84,
- create_sql => 'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1
+ create_sql =>
+ 'COMMENT ON TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1
IS \'comment on text search dictionary\';',
- regexp => qr/^COMMENT ON TEXT SEARCH DICTIONARY alt_ts_dict1 IS 'comment on text search dictionary';/m,
- like => {
+ regexp =>
+qr/^COMMENT ON TEXT SEARCH DICTIONARY alt_ts_dict1 IS 'comment on text search dictionary';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_order => 84,
create_sql => 'COMMENT ON TEXT SEARCH PARSER dump_test.alt_ts_prs1
IS \'comment on text search parser\';',
- regexp => qr/^COMMENT ON TEXT SEARCH PARSER alt_ts_prs1 IS 'comment on text search parser';/m,
- like => {
+ regexp =>
+qr/^COMMENT ON TEXT SEARCH PARSER alt_ts_prs1 IS 'comment on text search parser';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
all_runs => 1,
catch_all => 'COMMENT commands',
create_order => 84,
- create_sql => 'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1
+ create_sql => 'COMMENT ON TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1
IS \'comment on text search template\';',
- regexp => qr/^COMMENT ON TEXT SEARCH TEMPLATE alt_ts_temp1 IS 'comment on text search template';/m,
- like => {
+ regexp =>
+qr/^COMMENT ON TEXT SEARCH TEMPLATE alt_ts_temp1 IS 'comment on text search template';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
column_inserts => 1,
data_only => 1,
exclude_dump_test_schema => 1,
- only_dump_test_table => 1,
+ only_dump_test_table => 1,
role => 1,
section_post_data => 1, }, },
create_order => 71,
create_sql => 'COMMENT ON TYPE dump_test.undefined
IS \'comment on undefined type\';',
- regexp => qr/^COMMENT ON TYPE undefined IS 'comment on undefined type';/m,
- like => {
+ regexp =>
+ qr/^COMMENT ON TYPE undefined IS 'comment on undefined type';/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 76,
- create_sql =>
- 'CREATE COLLATION test0 FROM "C";',
- regexp =>
- qr/^
+ create_sql => 'CREATE COLLATION test0 FROM "C";',
+ regexp => qr/^
\QCREATE COLLATION test0 (provider = libc, locale = 'C');\E/xm,
- collation => 1,
- like => {
+ collation => 1,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
'CREATE CONVERSION dump_test.test_conversion' => {
all_runs => 1,
create_order => 78,
- create_sql => 'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;',
- regexp => qr/^\QCREATE DEFAULT CONVERSION test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm,
+ create_sql =>
+'CREATE DEFAULT CONVERSION dump_test.test_conversion FOR \'LATIN1\' TO \'UTF8\' FROM iso8859_1_to_utf8;',
+ regexp =>
+qr/^\QCREATE DEFAULT CONVERSION test_conversion FOR 'LATIN1' TO 'UTF8' FROM iso8859_1_to_utf8;\E/xm,
like => {
binary_upgrade => 1,
clean => 1,
'CREATE OPERATOR FAMILY dump_test.op_family' => {
all_runs => 1,
create_order => 73,
- create_sql => 'CREATE OPERATOR FAMILY dump_test.op_family USING btree;',
+ create_sql =>
+ 'CREATE OPERATOR FAMILY dump_test.op_family USING btree;',
regexp => qr/^
\QCREATE OPERATOR FAMILY op_family USING btree;\E
/xm,
'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1' => {
all_runs => 1,
create_order => 80,
- create_sql => 'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (copy=english);',
+ create_sql =>
+'CREATE TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 (copy=english);',
regexp => qr/^
\QCREATE TEXT SEARCH CONFIGURATION alt_ts_conf1 (\E\n
\s+\QPARSER = pg_catalog."default" );\E/xm,
section_post_data => 1, }, },
'ALTER TEXT SEARCH CONFIGURATION dump_test.alt_ts_conf1 ...' => {
- all_runs => 1,
- regexp => qr/^
+ all_runs => 1,
+ regexp => qr/^
\QALTER TEXT SEARCH CONFIGURATION alt_ts_conf1\E\n
\s+\QADD MAPPING FOR asciiword WITH english_stem;\E\n
\n
'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1' => {
all_runs => 1,
create_order => 81,
- create_sql => 'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (lexize=dsimple_lexize);',
+ create_sql =>
+'CREATE TEXT SEARCH TEMPLATE dump_test.alt_ts_temp1 (lexize=dsimple_lexize);',
regexp => qr/^
\QCREATE TEXT SEARCH TEMPLATE alt_ts_temp1 (\E\n
\s+\QLEXIZE = dsimple_lexize );\E/xm,
'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1' => {
all_runs => 1,
create_order => 83,
- create_sql => 'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (template=simple);',
+ create_sql =>
+'CREATE TEXT SEARCH DICTIONARY dump_test.alt_ts_dict1 (template=simple);',
regexp => qr/^
\QCREATE TEXT SEARCH DICTIONARY alt_ts_dict1 (\E\n
\s+\QTEMPLATE = pg_catalog.simple );\E\n
'CREATE FOREIGN TABLE dump_test.foreign_table SERVER s1' => {
all_runs => 1,
create_order => 88,
- create_sql => 'CREATE FOREIGN TABLE dump_test.foreign_table (c1 int options (column_name \'col1\'))
+ create_sql =>
+'CREATE FOREIGN TABLE dump_test.foreign_table (c1 int options (column_name \'col1\'))
SERVER s1 OPTIONS (schema_name \'x1\');',
- regexp => qr/
+ regexp => qr/
\QCREATE FOREIGN TABLE foreign_table (\E\n
\s+\Qc1 integer\E\n
\Q)\E\n
\s+\Qschema_name 'x1'\E\n
\Q);\E\n
/xm,
- like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- no_owner => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ like => {
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ no_owner => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
column_inserts => 1,
data_only => 1,
'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1' => {
all_runs => 1,
create_order => 86,
- create_sql => 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;',
- regexp => qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m,
- like => {
+ create_sql =>
+ 'CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;',
+ regexp =>
+ qr/CREATE USER MAPPING FOR regress_dump_test_role SERVER s1;/m,
+ like => {
binary_upgrade => 1,
clean => 1,
clean_if_exists => 1,
create_sql => 'CREATE PUBLICATION pub2
FOR ALL TABLES
WITH (publish = \'\');',
- regexp => qr/^
+ regexp => qr/^
\QCREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish = '');\E
/xm,
like => {
create_sql => 'CREATE SUBSCRIPTION sub1
CONNECTION \'dbname=doesnotexist\' PUBLICATION pub1
WITH (connect = false);',
- regexp => qr/^
+ regexp => qr/^
\QCREATE SUBSCRIPTION sub1 CONNECTION 'dbname=doesnotexist' PUBLICATION pub1 WITH (connect = false, slot_name = 'sub1');\E
/xm,
like => {
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 11,
- create_sql => 'CREATE UNLOGGED TABLE dump_test_second_schema.test_third_table (
+ create_sql =>
+ 'CREATE UNLOGGED TABLE dump_test_second_schema.test_third_table (
col1 serial
) WITH OIDS;',
regexp => qr/^
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 90,
- create_sql => 'CREATE TABLE dump_test.measurement (
+ create_sql => 'CREATE TABLE dump_test.measurement (
city_id int not null,
logdate date not null,
peaktemp int,
\QPARTITION BY RANGE (logdate);\E\n
/xm,
like => {
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_privs => 1,
- no_owner => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_privs => 1,
+ no_owner => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
binary_upgrade => 1,
exclude_dump_test_schema => 1,
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 91,
- create_sql => 'CREATE TABLE dump_test_second_schema.measurement_y2006m2
+ create_sql =>
+ 'CREATE TABLE dump_test_second_schema.measurement_y2006m2
PARTITION OF dump_test.measurement FOR VALUES
FROM (\'2006-02-01\') TO (\'2006-03-01\');',
regexp => qr/^
all_runs => 1,
catch_all => 'CREATE ... commands',
create_order => 62,
- create_sql => 'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;',
+ create_sql =>
+ 'ALTER VIEW dump_test.test_view ALTER COLUMN col1 SET DEFAULT 1;',
regexp => qr/^
\QALTER TABLE ONLY test_view ALTER COLUMN col1 SET DEFAULT 1;\E/xm,
like => {
all_runs => 1,
catch_all => 'GRANT commands',
create_order => 72,
- create_sql => 'GRANT USAGE ON DOMAIN dump_test.us_postal_code TO regress_dump_test_role;',
+ create_sql =>
+'GRANT USAGE ON DOMAIN dump_test.us_postal_code TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON TYPE us_postal_code TO regress_dump_test_role;\E
/xm,
all_runs => 1,
catch_all => 'GRANT commands',
create_order => 87,
- create_sql => 'GRANT USAGE ON TYPE dump_test.int42 TO regress_dump_test_role;',
+ create_sql =>
+ 'GRANT USAGE ON TYPE dump_test.int42 TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON TYPE int42 TO regress_dump_test_role;\E
/xm,
all_runs => 1,
catch_all => 'GRANT commands',
create_order => 66,
- create_sql => 'GRANT USAGE ON TYPE dump_test.planets TO regress_dump_test_role;',
+ create_sql =>
+ 'GRANT USAGE ON TYPE dump_test.planets TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON TYPE planets TO regress_dump_test_role;\E
/xm,
all_runs => 1,
catch_all => 'GRANT commands',
create_order => 67,
- create_sql => 'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;',
+ create_sql =>
+'GRANT USAGE ON TYPE dump_test.textrange TO regress_dump_test_role;',
regexp => qr/^
\QGRANT ALL ON TYPE textrange TO regress_dump_test_role;\E
/xm,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
TABLE dump_test.measurement
TO regress_dump_test_role;',
regexp =>
-qr/^GRANT SELECT ON TABLE measurement TO regress_dump_test_role;/m,
+ qr/^GRANT SELECT ON TABLE measurement TO regress_dump_test_role;/m,
like => {
- binary_upgrade => 1,
- clean => 1,
- clean_if_exists => 1,
- createdb => 1,
- defaults => 1,
- exclude_test_table => 1,
- exclude_test_table_data => 1,
- no_blobs => 1,
- no_owner => 1,
- only_dump_test_schema => 1,
- pg_dumpall_dbprivs => 1,
- schema_only => 1,
- section_pre_data => 1,
- test_schema_plus_blobs => 1,
- with_oids => 1, },
+ binary_upgrade => 1,
+ clean => 1,
+ clean_if_exists => 1,
+ createdb => 1,
+ defaults => 1,
+ exclude_test_table => 1,
+ exclude_test_table_data => 1,
+ no_blobs => 1,
+ no_owner => 1,
+ only_dump_test_schema => 1,
+ pg_dumpall_dbprivs => 1,
+ schema_only => 1,
+ section_pre_data => 1,
+ test_schema_plus_blobs => 1,
+ with_oids => 1, },
unlike => {
column_inserts => 1,
data_only => 1,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
section_pre_data => 1,
with_oids => 1, },
unlike => {
- column_inserts => 1,
- data_only => 1,
+ column_inserts => 1,
+ data_only => 1,
only_dump_test_schema => 1,
only_dump_test_table => 1,
pg_dumpall_globals => 1,
test_schema_plus_blobs => 1, }, },
'GRANT USAGE ON SCHEMA public TO public' => {
- regexp => qr/^
+ regexp => qr/^
\Q--\E\n\n
\QGRANT USAGE ON SCHEMA public TO PUBLIC;\E
/xm,
like => {
- clean => 1,
- clean_if_exists => 1, },
+ clean => 1,
+ clean_if_exists => 1, },
unlike => {
binary_upgrade => 1,
createdb => 1,
regexp => qr/^
\QREFRESH MATERIALIZED VIEW matview_third;\E
/xms,
- like => { },
+ like => {},
unlike => {
binary_upgrade => 1,
clean => 1,
regexp => qr/^
\QREFRESH MATERIALIZED VIEW matview_fourth;\E
/xms,
- like => { },
+ like => {},
unlike => {
binary_upgrade => 1,
clean => 1,
# If it doesn't then we will skip all the COLLATION-related tests.
my $collation_support = 0;
my $collation_check_stderr;
-$node->psql('postgres',"CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;", on_error_stop => 0, stderr => \$collation_check_stderr);
+$node->psql(
+ 'postgres',
+ "CREATE COLLATION testing FROM \"C\"; DROP COLLATION testing;",
+ on_error_stop => 0,
+ stderr => \$collation_check_stderr);
if ($collation_check_stderr !~ /ERROR: /)
{
# Then count all the tests run against each run
foreach my $test (sort keys %tests)
{
+
# Skip any collation-related commands if there is no collation support
- if (!$collation_support && defined($tests{$test}->{collation})) {
+ if (!$collation_support && defined($tests{$test}->{collation}))
+ {
next;
}
{
if ($tests{$test}->{create_sql})
{
+
# Skip any collation-related commands if there is no collation support
- if (!$collation_support && defined($tests{$test}->{collation})) {
+ if (!$collation_support && defined($tests{$test}->{collation}))
+ {
next;
}
foreach my $test (sort keys %tests)
{
+
# Skip any collation-related commands if there is no collation support
- if (!$collation_support && defined($tests{$test}->{collation})) {
+ if (!$collation_support && defined($tests{$test}->{collation}))
+ {
next;
}
# XXX no printed message when this fails, just SIGPIPE termination
$node->command_ok(
- [ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt,
- '-U', $dbname1, $node->connstr($dbname1) ],
+ [ 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $dbname1,
+ $node->connstr($dbname1) ],
'parallel dump');
# recreate $dbname1 for restore test
'cluster specific table');
$node->command_ok([qw(clusterdb --echo --verbose dbname=template1)],
- 'clusterdb with connection string');
+ 'clusterdb with connection string');
'reindex with verbose output');
$node->command_ok([qw(reindexdb --echo --table=pg_am dbname=template1)],
- 'reindexdb table with connection string');
-$node->command_ok([qw(reindexdb --echo dbname=template1)],
- 'reindexdb database with connection string');
-$node->command_ok([qw(reindexdb --echo --system dbname=template1)],
- 'reindexdb system with connection string');
+ 'reindexdb table with connection string');
+$node->command_ok(
+ [qw(reindexdb --echo dbname=template1)],
+ 'reindexdb database with connection string');
+$node->command_ok(
+ [qw(reindexdb --echo --system dbname=template1)],
+ 'reindexdb system with connection string');
qr/statement: ANALYZE;/,
'vacuumdb -Z');
$node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)],
- 'vacuumdb with connection string');
+ 'vacuumdb with connection string');
# interpret everything as UTF8. We're going to use byte sequences
# that aren't valid UTF-8 strings, so that would fail. Use LATIN1,
# which accepts any byte and has a conversion from each byte to UTF-8.
-$ENV{LC_ALL} = 'C';
+$ENV{LC_ALL} = 'C';
$ENV{PGCLIENTENCODING} = 'LATIN1';
# Create database names covering the range of LATIN1 characters and
# run the utilities' --all options over them.
-my $dbname1 = generate_ascii_string(1, 63); # contains '='
-my $dbname2 = generate_ascii_string(67, 129); # skip 64-66 to keep length to 62
+my $dbname1 = generate_ascii_string(1, 63); # contains '='
+my $dbname2 =
+ generate_ascii_string(67, 129); # skip 64-66 to keep length to 62
my $dbname3 = generate_ascii_string(130, 192);
my $dbname4 = generate_ascii_string(193, 255);
my $node = get_new_node('main');
-$node->init(extra => ['--locale=C', '--encoding=LATIN1']);
+$node->init(extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node->start;
foreach my $dbname ($dbname1, $dbname2, $dbname3, $dbname4, 'CamelCase')
{
- $node->run_log(['createdb', $dbname]);
+ $node->run_log([ 'createdb', $dbname ]);
}
-$node->command_ok([qw(vacuumdb --all --echo --analyze-only)],
- 'vacuumdb --all with unusual database names');
+$node->command_ok(
+ [qw(vacuumdb --all --echo --analyze-only)],
+ 'vacuumdb --all with unusual database names');
$node->command_ok([qw(reindexdb --all --echo)],
- 'reindexdb --all with unusual database names');
-$node->command_ok([qw(clusterdb --all --echo --verbose)],
- 'clusterdb --all with unusual database names');
+ 'reindexdb --all with unusual database names');
+$node->command_ok(
+ [qw(clusterdb --all --echo --verbose)],
+ 'clusterdb --all with unusual database names');
# and character decomposition mapping
my @characters = ();
my %character_hash = ();
-open($FH, '<', "UnicodeData.txt") or die "Could not open UnicodeData.txt: $!.";
+open($FH, '<', "UnicodeData.txt")
+ or die "Could not open UnicodeData.txt: $!.";
while (my $line = <$FH>)
{
+
# Split the line wanted and get the fields needed:
# - Unicode code value
# - Canonical Combining Class
if ($decomp_size == 2)
{
+
# Should this be used for recomposition?
if ($compat)
{
}
elsif ($decomp_size == 1 && length($first_decomp) <= 4)
{
+
# The decomposition consists of a single codepoint, and it fits
# in a uint16, so we can store it "inline" in the main table.
$flags .= " | DECOMP_INLINE";
print $OUTPUT "," unless ($code eq $last_code);
if ($comment ne "")
{
+
# If the line is wide already, indent the comment with one tab,
# otherwise with two. This is to make the output match the way
# pgindent would mangle it. (This is quite hacky. To do this
}
# restore STDOUT/ERR so we can print the outcome to the user
-open(STDERR, ">&", $olderr_fh) or die; # can't complain as STDERR is still duped
+open(STDERR, ">&", $olderr_fh)
+ or die; # can't complain as STDERR is still duped
open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!";
# just in case
{
- package PostgreSQL::InServer; ## no critic (RequireFilenameMatchesPackage);
+ package PostgreSQL::InServer
+ ; ## no critic (RequireFilenameMatchesPackage);
use strict;
use warnings;
# src/pl/plperl/plc_trusted.pl
-package PostgreSQL::InServer::safe; ## no critic (RequireFilenameMatchesPackage);
+package PostgreSQL::InServer::safe
+ ; ## no critic (RequireFilenameMatchesPackage);
# Load widely useful pragmas into plperl to make them available.
#
use PostgresNode;
use TestLib;
use Test::More;
-if ($windows_os)
+if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
# and then execute a reload to refresh it.
sub reset_pg_hba
{
- my $node = shift;
+ my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
# Test access for a single role, useful to wrap all tests into one.
sub test_role
{
- my $node = shift;
- my $role = shift;
- my $method = shift;
- my $expected_res = shift;
+ my $node = shift;
+ my $role = shift;
+ my $method = shift;
+ my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
- my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
+ my $res =
+ $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
- "authentication $status_string for method $method, role $role");
+ "authentication $status_string for method $method, role $role");
}
# Initialize master node
# Create 3 roles with different password methods for each one. The same
# password is used for all of them.
-$node->safe_psql('postgres', "SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';");
-$node->safe_psql('postgres', "SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
+$node->safe_psql('postgres',
+"SET password_encryption='scram-sha-256'; CREATE ROLE scram_role LOGIN PASSWORD 'pass';"
+);
+$node->safe_psql('postgres',
+"SET password_encryption='md5'; CREATE ROLE md5_role LOGIN PASSWORD 'pass';");
$ENV{"PGPASSWORD"} = 'pass';
# For "trust" method, all users should be able to connect.
reset_pg_hba($node, 'trust');
test_role($node, 'scram_role', 'trust', 0);
-test_role($node, 'md5_role', 'trust', 0);
+test_role($node, 'md5_role', 'trust', 0);
# For plain "password" method, all users should also be able to connect.
reset_pg_hba($node, 'password');
test_role($node, 'scram_role', 'password', 0);
-test_role($node, 'md5_role', 'password', 0);
+test_role($node, 'md5_role', 'password', 0);
# For "scram-sha-256" method, user "scram_role" should be able to connect.
reset_pg_hba($node, 'scram-sha-256');
test_role($node, 'scram_role', 'scram-sha-256', 0);
-test_role($node, 'md5_role', 'scram-sha-256', 2);
+test_role($node, 'md5_role', 'scram-sha-256', 2);
# For "md5" method, all users should be able to connect (SCRAM
# authentication will be performed for the user with a scram verifier.)
reset_pg_hba($node, 'md5');
test_role($node, 'scram_role', 'md5', 0);
-test_role($node, 'md5_role', 'md5', 0);
+test_role($node, 'md5_role', 'md5', 0);
use PostgresNode;
use TestLib;
use Test::More;
-if ($windows_os)
+if ($windows_os)
{
plan skip_all => "authentication tests cannot run on Windows";
}
# and then execute a reload to refresh it.
sub reset_pg_hba
{
- my $node = shift;
+ my $node = shift;
my $hba_method = shift;
unlink($node->data_dir . '/pg_hba.conf');
# Test access for a single role, useful to wrap all tests into one.
sub test_login
{
- my $node = shift;
- my $role = shift;
- my $password = shift;
- my $expected_res = shift;
+ my $node = shift;
+ my $role = shift;
+ my $password = shift;
+ my $expected_res = shift;
my $status_string = 'failed';
$status_string = 'success' if ($expected_res eq 0);
$ENV{"PGPASSWORD"} = $password;
- my $res = $node->psql('postgres', 'SELECT 1', extra_params => ['-U', $role]);
+ my $res =
+ $node->psql('postgres', 'SELECT 1', extra_params => [ '-U', $role ]);
is($res, $expected_res,
- "authentication $status_string for role $role with password $password");
+ "authentication $status_string for role $role with password $password"
+ );
}
# Initialize master node. Force UTF-8 encoding, so that we can use non-ASCII
# characters in the passwords below.
my $node = get_new_node('master');
-$node->init(extra => ['--locale=C', '--encoding=UTF8']);
+$node->init(extra => [ '--locale=C', '--encoding=UTF8' ]);
$node->start;
# These tests are based on the example strings from RFC4013.txt,
# 7 <U+0627><U+0031> Error - bidirectional check
# Create test roles.
-$node->safe_psql('postgres',
-"SET password_encryption='scram-sha-256';
+$node->safe_psql(
+ 'postgres',
+ "SET password_encryption='scram-sha-256';
SET client_encoding='utf8';
CREATE ROLE saslpreptest1_role LOGIN PASSWORD 'IX';
CREATE ROLE saslpreptest4a_role LOGIN PASSWORD 'a';
reset_pg_hba($node, 'scram-sha-256');
# Check that #1 and #5 are treated the same as just 'IX'
-test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
+test_login($node, 'saslpreptest1_role', "I\xc2\xadX", 0);
test_login($node, 'saslpreptest1_role', "\xe2\x85\xa8", 0);
# but different from lower case 'ix'
test_login($node, 'saslpreptest1_role', "ix", 2);
# Check #4
-test_login($node, 'saslpreptest4a_role', "a", 0);
+test_login($node, 'saslpreptest4a_role', "a", 0);
test_login($node, 'saslpreptest4a_role', "\xc2\xaa", 0);
-test_login($node, 'saslpreptest4b_role', "a", 0);
+test_login($node, 'saslpreptest4b_role', "a", 0);
test_login($node, 'saslpreptest4b_role', "\xc2\xaa", 0);
# Check #6 and #7 - In PostgreSQL, contrary to the spec, if the password
# contains prohibited characters, we use it as is, without normalization.
test_login($node, 'saslpreptest6_role', "foo\x07bar", 0);
-test_login($node, 'saslpreptest6_role', "foobar", 2);
+test_login($node, 'saslpreptest6_role', "foobar", 2);
test_login($node, 'saslpreptest7_role', "foo\xd8\xa71bar", 0);
test_login($node, 'saslpreptest7_role', "foo1\xd8\xa7bar", 2);
-test_login($node, 'saslpreptest7_role', "foobar", 2);
+test_login($node, 'saslpreptest7_role', "foobar", 2);
$master->append_conf('postgresql.conf', 'track_commit_timestamp = off');
$master->restart;
$master->safe_psql('postgres', 'checkpoint');
-$master_lsn =
- $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
+$master_lsn = $master->safe_psql('postgres', 'select pg_current_wal_lsn()');
$standby->poll_query_until('postgres',
qq{SELECT '$master_lsn'::pg_lsn <= pg_last_wal_replay_lsn()})
or die "slave never caught up";
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('1');]);
-is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
+is($ret, 0, 'getting ts of BootstrapTransactionId succeeds');
is($stdout, '', 'timestamp of BootstrapTransactionId is null');
($ret, $stdout, $stderr) =
$node_master->psql('postgres', qq[SELECT pg_xact_commit_timestamp('2');]);
-is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
+is($ret, 0, 'getting ts of FrozenTransactionId succeeds');
is($stdout, '', 'timestamp of FrozenTransactionId is null');
# Since FirstNormalTransactionId will've occurred during initdb, long before we
my %pgdump_runs = (
binary_upgrade => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/binary_upgrade.sql",
- '--schema-only',
- '--binary-upgrade',
- '--dbname=postgres', ], },
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/binary_upgrade.sql", '--schema-only',
+ '--binary-upgrade', '--dbname=postgres', ], },
clean => {
dump_cmd => [
'pg_dump', "--file=$tempdir/clean.sql",
'postgres', ], },
column_inserts => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/column_inserts.sql",
- '-a',
- '--column-inserts',
- 'postgres', ], },
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/column_inserts.sql", '-a',
+ '--column-inserts', 'postgres', ], },
createdb => {
dump_cmd => [
'pg_dump',
'--no-sync',
"--file=$tempdir/createdb.sql",
'-C',
- '-R', # no-op, just for testing
+ '-R', # no-op, just for testing
'postgres', ], },
data_only => {
dump_cmd => [
'--no-sync',
"--file=$tempdir/data_only.sql",
'-a',
- '-v', # no-op, just make sure it works
+ '-v', # no-op, just make sure it works
'postgres', ], },
defaults => {
dump_cmd => [ 'pg_dump', '-f', "$tempdir/defaults.sql", 'postgres', ],
"$tempdir/defaults_tar_format.tar", ], },
pg_dumpall_globals => {
dump_cmd => [
- 'pg_dumpall',
- '--no-sync',
- "--file=$tempdir/pg_dumpall_globals.sql",
- '-g', ],
- },
+ 'pg_dumpall', '--no-sync',
+ "--file=$tempdir/pg_dumpall_globals.sql", '-g', ], },
no_privs => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_privs.sql",
- '-x',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_privs.sql", '-x',
'postgres', ], },
no_owner => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/no_owner.sql",
- '-O',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/no_owner.sql", '-O',
'postgres', ], },
schema_only => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/schema_only.sql",
- '-s',
- 'postgres', ],
- },
+ 'pg_dump', '--no-sync', "--file=$tempdir/schema_only.sql",
+ '-s', 'postgres', ], },
section_pre_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_pre_data.sql",
- '--section=pre-data',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_pre_data.sql", '--section=pre-data',
'postgres', ], },
section_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_data.sql",
- '--section=data',
+ 'pg_dump', '--no-sync',
+ "--file=$tempdir/section_data.sql", '--section=data',
'postgres', ], },
section_post_data => {
dump_cmd => [
- 'pg_dump',
- '--no-sync',
- "--file=$tempdir/section_post_data.sql",
+ 'pg_dump', '--no-sync', "--file=$tempdir/section_post_data.sql",
'--section=post-data', 'postgres', ], },);
###############################################################
pg_dumpall_globals => 1,
section_post_data => 1, }, },
- 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role'
- => {
- create_order => 4,
+ 'GRANT SELECT(col2) ON regress_pg_dump_table TO regress_dump_test_role' =>
+ { create_order => 4,
create_sql => 'GRANT SELECT(col2) ON regress_pg_dump_table
TO regress_dump_test_role;',
regexp => qr/^
my $name = $self->name;
print "### Restarting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'restart');
+ 'restart');
$self->_update_pid(1);
}
my $name = $self->name;
print "### Promoting node \"$name\"\n";
TestLib::system_or_bail('pg_ctl', '-D', $pgdata, '-l', $logfile,
- 'promote');
+ 'promote');
}
# Internal routine to enable streaming replication on a standby node.
$self->{_pid} = undef;
print "# No postmaster PID for node \"$name\"\n";
+
# Complain if we expected to find a pidfile.
BAIL_OUT("postmaster.pid unexpectedly not present") if $is_running;
}
my $exc_save = $@;
if ($exc_save)
{
+
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
- if (blessed($exc_save) || $exc_save !~ /^\Q$timeout_exception\E/);
+ if (blessed($exc_save)
+ || $exc_save !~ /^\Q$timeout_exception\E/);
$ret = undef;
if $ret == 1;
die "connection error: '$$stderr'\nwhile running '@psql_params'"
if $ret == 2;
- die "error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
+ die
+"error running SQL: '$$stderr'\nwhile running '@psql_params' with sql '$sql'"
if $ret == 3;
die "psql returns $ret: '$$stderr'\nwhile running '@psql_params'";
}
sub lsn
{
my ($self, $mode) = @_;
- my %modes = ('insert' => 'pg_current_wal_insert_lsn()',
- 'flush' => 'pg_current_wal_flush_lsn()',
- 'write' => 'pg_current_wal_lsn()',
- 'receive' => 'pg_last_wal_receive_lsn()',
- 'replay' => 'pg_last_wal_replay_lsn()');
+ my %modes = (
+ 'insert' => 'pg_current_wal_insert_lsn()',
+ 'flush' => 'pg_current_wal_flush_lsn()',
+ 'write' => 'pg_current_wal_lsn()',
+ 'receive' => 'pg_last_wal_receive_lsn()',
+ 'replay' => 'pg_last_wal_replay_lsn()');
$mode = '<undef>' if !defined($mode);
- die "unknown mode for 'lsn': '$mode', valid modes are " . join(', ', keys %modes)
- if !defined($modes{$mode});
+ die "unknown mode for 'lsn': '$mode', valid modes are "
+ . join(', ', keys %modes)
+ if !defined($modes{$mode});
my $result = $self->safe_psql('postgres', "SELECT $modes{$mode}");
chomp($result);
{
my ($self, $standby_name, $mode, $target_lsn) = @_;
$mode = defined($mode) ? $mode : 'replay';
- my %valid_modes = ( 'sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1 );
- die "unknown mode $mode for 'wait_for_catchup', valid modes are " . join(', ', keys(%valid_modes)) unless exists($valid_modes{$mode});
+ my %valid_modes =
+ ('sent' => 1, 'write' => 1, 'flush' => 1, 'replay' => 1);
+ die "unknown mode $mode for 'wait_for_catchup', valid modes are "
+ . join(', ', keys(%valid_modes))
+ unless exists($valid_modes{$mode});
+
# Allow passing of a PostgresNode instance as shorthand
- if ( blessed( $standby_name ) && $standby_name->isa("PostgresNode") )
+ if (blessed($standby_name) && $standby_name->isa("PostgresNode"))
{
$standby_name = $standby_name->name;
}
die 'target_lsn must be specified' unless defined($target_lsn);
- print "Waiting for replication conn " . $standby_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
- my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
+ print "Waiting for replication conn "
+ . $standby_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_stat_replication WHERE application_name = '$standby_name';];
$self->poll_query_until('postgres', $query)
- or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
+ or die "timed out waiting for catchup, current location is "
+ . ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
die "valid modes are restart, confirmed_flush";
}
die 'target lsn must be specified' unless defined($target_lsn);
- print "Waiting for replication slot " . $slot_name . "'s " . $mode . "_lsn to pass " . $target_lsn . " on " . $self->name . "\n";
- my $query = qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
+ print "Waiting for replication slot "
+ . $slot_name . "'s "
+ . $mode
+ . "_lsn to pass "
+ . $target_lsn . " on "
+ . $self->name . "\n";
+ my $query =
+qq[SELECT '$target_lsn' <= ${mode}_lsn FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name';];
$self->poll_query_until('postgres', $query)
- or die "timed out waiting for catchup, current location is " . ($self->safe_psql('postgres', $query) || '(unknown)');
+ or die "timed out waiting for catchup, current location is "
+ . ($self->safe_psql('postgres', $query) || '(unknown)');
print "done\n";
}
sub query_hash
{
my ($self, $dbname, $query, @columns) = @_;
- die 'calls in array context for multi-row results not supported yet' if (wantarray);
+ die 'calls in array context for multi-row results not supported yet'
+ if (wantarray);
+
# Replace __COLUMNS__ if found
- substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) = join(', ', @columns)
- if index($query, '__COLUMNS__') >= 0;
+ substr($query, index($query, '__COLUMNS__'), length('__COLUMNS__')) =
+ join(', ', @columns)
+ if index($query, '__COLUMNS__') >= 0;
my $result = $self->safe_psql($dbname, $query);
+
# hash slice, see http://stackoverflow.com/a/16755894/398670 .
#
# Fills the hash with empty strings produced by x-operator element
# duplication if result is an empty row
#
my %val;
- @val{@columns} = $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
+ @val{@columns} =
+ $result ne '' ? split(qr/\|/, $result) : ('',) x scalar(@columns);
return \%val;
}
sub slot
{
my ($self, $slot_name) = @_;
- my @columns = ('plugin', 'slot_type', 'datoid', 'database', 'active', 'active_pid', 'xmin', 'catalog_xmin', 'restart_lsn');
- return $self->query_hash('postgres', "SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'", @columns);
+ my @columns = (
+ 'plugin', 'slot_type', 'datoid', 'database',
+ 'active', 'active_pid', 'xmin', 'catalog_xmin',
+ 'restart_lsn');
+ return $self->query_hash(
+ 'postgres',
+"SELECT __COLUMNS__ FROM pg_catalog.pg_replication_slots WHERE slot_name = '$slot_name'",
+ @columns);
}
=pod
sub pg_recvlogical_upto
{
- my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) = @_;
+ my ($self, $dbname, $slot_name, $endpos, $timeout_secs, %plugin_options) =
+ @_;
my ($stdout, $stderr);
my $timeout_exception = 'pg_recvlogical timed out';
die 'slot name must be specified' unless defined($slot_name);
- die 'endpos must be specified' unless defined($endpos);
+ die 'endpos must be specified' unless defined($endpos);
- my @cmd = ('pg_recvlogical', '-S', $slot_name, '--dbname', $self->connstr($dbname));
+ my @cmd = (
+ 'pg_recvlogical', '-S', $slot_name, '--dbname',
+ $self->connstr($dbname));
push @cmd, '--endpos', $endpos;
push @cmd, '-f', '-', '--no-loop', '--start';
while (my ($k, $v) = each %plugin_options)
{
- die "= is not permitted to appear in replication option name" if ($k =~ qr/=/);
+ die "= is not permitted to appear in replication option name"
+ if ($k =~ qr/=/);
push @cmd, "-o", "$k=$v";
}
my $timeout;
- $timeout = IPC::Run::timeout($timeout_secs, exception => $timeout_exception ) if $timeout_secs;
+ $timeout =
+ IPC::Run::timeout($timeout_secs, exception => $timeout_exception)
+ if $timeout_secs;
my $ret = 0;
- do {
+ do
+ {
local $@;
eval {
IPC::Run::run(\@cmd, ">", \$stdout, "2>", \$stderr, $timeout);
my $exc_save = $@;
if ($exc_save)
{
+
# IPC::Run::run threw an exception. re-throw unless it's a
# timeout, which we'll handle by testing is_expired
die $exc_save
die "Got timeout exception '$exc_save' but timer not expired?!"
unless $timeout->is_expired;
- die "$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
- unless wantarray;
+ die
+"$exc_save waiting for endpos $endpos with stdout '$stdout', stderr '$stderr'"
+ unless wantarray;
}
};
}
else
{
- die "pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'" if $ret;
+ die
+"pg_recvlogical exited with code '$ret', stdout '$stdout' and stderr '$stderr'"
+ if $ret;
return $stdout;
}
}
use File::Temp ();
use IPC::Run;
use SimpleTee;
+
# specify a recent enough version of Test::More to support the note() function
use Test::More 0.82;
# Hijack STDOUT and STDERR to the log file
open(my $orig_stdout, '>&', \*STDOUT);
open(my $orig_stderr, '>&', \*STDERR);
- open(STDOUT, '>&', $testlog);
- open(STDERR, '>&', $testlog);
+ open(STDOUT, '>&', $testlog);
+ open(STDERR, '>&', $testlog);
# The test output (ok ...) needs to be printed to the original STDOUT so
# that the 'prove' program can parse it, and display it to the user in
"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a");
# Wait for standbys to catch up
-$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
-$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
+$node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('insert'));
+$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('replay'));
my $result =
$node_standby_1->safe_psql('postgres', "SELECT count(*) FROM tab_int");
# target_session_attrs with multiple nodes.
sub test_target_session_attrs
{
- my $node1 = shift;
- my $node2 = shift;
+ my $node1 = shift;
+ my $node2 = shift;
my $target_node = shift;
- my $mode = shift;
- my $status = shift;
+ my $mode = shift;
+ my $status = shift;
my $node1_host = $node1->host;
my $node1_port = $node1->port;
# The client used for the connection does not matter, only the backend
# point does.
my ($ret, $stdout, $stderr) =
- $node1->psql('postgres', 'SHOW port;', extra_params => ['-d', $connstr]);
- is($status == $ret && $stdout eq $target_node->port, 1,
- "connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed");
+ $node1->psql('postgres', 'SHOW port;',
+ extra_params => [ '-d', $connstr ]);
+ is( $status == $ret && $stdout eq $target_node->port,
+ 1,
+"connect to node $target_name if mode \"$mode\" and $node1_name,$node2_name listed"
+ );
}
# Connect to master in "read-write" mode with master,standby1 list.
test_target_session_attrs($node_master, $node_standby_1, $node_master,
- "read-write", 0);
+ "read-write", 0);
+
# Connect to master in "read-write" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_master,
- "read-write", 0);
+ "read-write", 0);
+
# Connect to master in "any" mode with master,standby1 list.
-test_target_session_attrs($node_master, $node_standby_1, $node_master,
- "any", 0);
+test_target_session_attrs($node_master, $node_standby_1, $node_master, "any",
+ 0);
+
# Connect to standby1 in "any" mode with standby1,master list.
test_target_session_attrs($node_standby_1, $node_master, $node_standby_1,
- "any", 0);
+ "any", 0);
note "switching to physical replication slot";
+
# Switch to using a physical replication slot. We can do this without a new
# backup since physical slots can go backwards if needed. Do so on both
# standbys. Since we're going to be testing things that affect the slot state,
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_master->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_master->restart;
-is($node_master->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_1');]), 0, 'physical slot created on master');
-$node_standby_1->append_conf('recovery.conf', "primary_slot_name = $slotname_1");
-$node_standby_1->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
+is( $node_master->psql(
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('$slotname_1');]),
+ 0,
+ 'physical slot created on master');
+$node_standby_1->append_conf('recovery.conf',
+ "primary_slot_name = $slotname_1");
+$node_standby_1->append_conf('postgresql.conf',
+ "wal_receiver_status_interval = 1");
$node_standby_1->append_conf('postgresql.conf', "max_replication_slots = 4");
$node_standby_1->restart;
-is($node_standby_1->psql('postgres', qq[SELECT pg_create_physical_replication_slot('$slotname_2');]), 0, 'physical slot created on intermediate replica');
-$node_standby_2->append_conf('recovery.conf', "primary_slot_name = $slotname_2");
-$node_standby_2->append_conf('postgresql.conf', "wal_receiver_status_interval = 1");
+is( $node_standby_1->psql(
+ 'postgres',
+ qq[SELECT pg_create_physical_replication_slot('$slotname_2');]),
+ 0,
+ 'physical slot created on intermediate replica');
+$node_standby_2->append_conf('recovery.conf',
+ "primary_slot_name = $slotname_2");
+$node_standby_2->append_conf('postgresql.conf',
+ "wal_receiver_status_interval = 1");
$node_standby_2->restart;
sub get_slot_xmins
# There's no hot standby feedback and there are no logical slots on either peer
# so xmin and catalog_xmin should be null on both slots.
my ($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
-is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
+is($xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'non-cascaded slot xmin null with no hs_feedback');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
+is($xmin, '', 'cascaded slot xmin null with no hs_feedback');
is($catalog_xmin, '', 'cascaded slot xmin null with no hs_feedback');
# Replication still works?
sub replay_check
{
- my $newval = $node_master->safe_psql('postgres', 'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val');
- $node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('insert'));
- $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('replay'));
- $node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
- or die "standby_1 didn't replay master value $newval";
- $node_standby_2->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval])
- or die "standby_2 didn't replay standby_1 value $newval";
+ my $newval = $node_master->safe_psql('postgres',
+'INSERT INTO replayed(val) SELECT coalesce(max(val),0) + 1 AS newval FROM replayed RETURNING val'
+ );
+ $node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('insert'));
+ $node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('replay'));
+ $node_standby_1->safe_psql('postgres',
+ qq[SELECT 1 FROM replayed WHERE val = $newval])
+ or die "standby_1 didn't replay master value $newval";
+ $node_standby_2->safe_psql('postgres',
+ qq[SELECT 1 FROM replayed WHERE val = $newval])
+ or die "standby_2 didn't replay standby_1 value $newval";
}
replay_check();
note "enabling hot_standby_feedback";
+
# Enable hs_feedback. The slot should gain an xmin. We set the status interval
# so we'll see the results promptly.
-$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_1->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_1->reload;
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
replay_check();
sleep(2);
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback');
note "doing some work to advance xmin";
-for my $i (10000..11000) {
+for my $i (10000 .. 11000)
+{
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES ($i);]);
}
$node_master->safe_psql('postgres', 'VACUUM;');
my ($xmin2, $catalog_xmin2) = get_slot_xmins($node_master, $slotname_1);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'non-cascaded slot xmin with hs feedback has changed');
-is($catalog_xmin2, '', 'non-cascaded slot xmin still null with hs_feedback unchanged');
+is($catalog_xmin2, '',
+ 'non-cascaded slot xmin still null with hs_feedback unchanged');
($xmin2, $catalog_xmin2) = get_slot_xmins($node_standby_1, $slotname_2);
note "new xmin $xmin2, old xmin $xmin";
isnt($xmin2, $xmin, 'cascaded slot xmin with hs feedback has changed');
-is($catalog_xmin2, '', 'cascaded slot xmin still null with hs_feedback unchanged');
+is($catalog_xmin2, '',
+ 'cascaded slot xmin still null with hs_feedback unchanged');
note "disabling hot_standby_feedback";
+
# Disable hs_feedback. Xmin should be cleared.
-$node_standby_1->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_1->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_1->reload;
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->reload;
replay_check();
sleep(2);
($xmin, $catalog_xmin) = get_slot_xmins($node_master, $slotname_1);
is($xmin, '', 'non-cascaded slot xmin null with hs feedback reset');
-is($catalog_xmin, '', 'non-cascaded slot xmin still null with hs_feedback reset');
+is($catalog_xmin, '',
+ 'non-cascaded slot xmin still null with hs_feedback reset');
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
+is($xmin, '', 'cascaded slot xmin null with hs feedback reset');
is($catalog_xmin, '', 'cascaded slot xmin still null with hs_feedback reset');
note "re-enabling hot_standby_feedback and disabling while stopped";
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = on;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = on;');
$node_standby_2->reload;
$node_master->safe_psql('postgres', qq[INSERT INTO tab_int VALUES (11000);]);
replay_check();
-$node_standby_2->safe_psql('postgres', 'ALTER SYSTEM SET hot_standby_feedback = off;');
+$node_standby_2->safe_psql('postgres',
+ 'ALTER SYSTEM SET hot_standby_feedback = off;');
$node_standby_2->stop;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
$node_standby_2->start;
($xmin, $catalog_xmin) = get_slot_xmins($node_standby_1, $slotname_2);
-is($xmin, '', 'cascaded slot xmin reset after startup with hs feedback reset');
+is($xmin, '',
+ 'cascaded slot xmin reset after startup with hs feedback reset');
foreach my $param_item (@$recovery_params)
{
- $node_standby->append_conf(
- 'recovery.conf', qq($param_item));
+ $node_standby->append_conf('recovery.conf', qq($param_item));
}
$node_standby->start;
# More data, with recovery target timestamp
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(2001,3000))");
-$ret = $node_master->safe_psql('postgres',
- "SELECT pg_current_wal_lsn(), now();");
+$ret =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn(), now();");
my ($lsn3, $recovery_time) = split /\|/, $ret;
# Even more data, this time with a recovery target name
# And now for a recovery target LSN
$node_master->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(4001,5000))");
-my $recovery_lsn = $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
+my $recovery_lsn =
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn()");
my $lsn5 =
$node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a");
# Wait until standby has replayed enough data on standby 1
-$node_master->wait_for_catchup($node_standby_1, 'replay', $node_master->lsn('write'));
+$node_master->wait_for_catchup($node_standby_1, 'replay',
+ $node_master->lsn('write'));
# Stop and remove master, and promote standby 1, switching it to a new timeline
$node_master->teardown_node;
# to ensure that the timeline switch has been done.
$node_standby_1->safe_psql('postgres',
"INSERT INTO tab_int VALUES (generate_series(1001,2000))");
-$node_standby_1->wait_for_catchup($node_standby_2, 'replay', $node_standby_1->lsn('write'));
+$node_standby_1->wait_for_catchup($node_standby_2, 'replay',
+ $node_standby_1->lsn('write'));
my $result =
$node_standby_2->safe_psql('postgres', "SELECT count(*) FROM tab_int");
# Done waiting?
my $replay_status = $node_standby->safe_psql('postgres',
- "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0"
- );
+ "SELECT (pg_last_wal_replay_lsn() - '$until_lsn'::pg_lsn) >= 0");
last if $replay_status eq 't';
# No, sleep some more.
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
$node_master->append_conf(
- 'postgresql.conf', qq(
+ 'postgresql.conf', qq(
wal_level = logical
));
$node_master->start;
my $backup_name = 'master_backup';
-$node_master->safe_psql('postgres', qq[CREATE TABLE decoding_test(x integer, y text);]);
+$node_master->safe_psql('postgres',
+ qq[CREATE TABLE decoding_test(x integer, y text);]);
-$node_master->safe_psql('postgres', qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
+$node_master->safe_psql('postgres',
+qq[SELECT pg_create_logical_replication_slot('test_slot', 'test_decoding');]);
-$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]);
+$node_master->safe_psql('postgres',
+qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,10) s;]
+);
# Basic decoding works
-my($result) = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
-is(scalar(my @foobar = split /^/m, $result), 12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
+my ($result) = $node_master->safe_psql('postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
+is(scalar(my @foobar = split /^/m, $result),
+ 12, 'Decoding produced 12 rows inc BEGIN/COMMIT');
# If we immediately crash the server we might lose the progress we just made
# and replay the same changes again. But a clean shutdown should never repeat
$node_master->restart('fast');
# There are no new writes, so the result should be empty.
-$result = $node_master->safe_psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
+$result = $node_master->safe_psql('postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]);
chomp($result);
is($result, '', 'Decoding after fast restart repeats no rows');
# Insert some rows and verify that we get the same results from pg_recvlogical
# and the SQL interface.
-$node_master->safe_psql('postgres', qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]);
+$node_master->safe_psql('postgres',
+qq[INSERT INTO decoding_test(x,y) SELECT s, s::text FROM generate_series(1,4) s;]
+);
my $expected = q{BEGIN
table public.decoding_test: INSERT: x[integer]:1 y[text]:'1'
table public.decoding_test: INSERT: x[integer]:4 y[text]:'4'
COMMIT};
-my $stdout_sql = $node_master->safe_psql('postgres', qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]);
+my $stdout_sql = $node_master->safe_psql('postgres',
+qq[SELECT data FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');]
+);
is($stdout_sql, $expected, 'got expected output from SQL decoding session');
-my $endpos = $node_master->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
+my $endpos = $node_master->safe_psql('postgres',
+"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+);
print "waiting to replay $endpos\n";
-my $stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+my $stdout_recv = $node_master->pg_recvlogical_upto(
+ 'postgres', 'test_slot', $endpos, 10,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
chomp($stdout_recv);
-is($stdout_recv, $expected, 'got same expected output from pg_recvlogical decoding session');
+is($stdout_recv, $expected,
+ 'got same expected output from pg_recvlogical decoding session');
-$stdout_recv = $node_master->pg_recvlogical_upto('postgres', 'test_slot', $endpos, 10, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+$stdout_recv = $node_master->pg_recvlogical_upto(
+ 'postgres', 'test_slot', $endpos, 10,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
chomp($stdout_recv);
-is($stdout_recv, '', 'pg_recvlogical acknowledged changes, nothing pending on slot');
+is($stdout_recv, '',
+ 'pg_recvlogical acknowledged changes, nothing pending on slot');
$node_master->safe_psql('postgres', 'CREATE DATABASE otherdb');
-is($node_master->psql('otherdb', "SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"), 3,
+is( $node_master->psql(
+ 'otherdb',
+"SELECT lsn FROM pg_logical_slot_peek_changes('test_slot', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+ ),
+ 3,
'replaying logical slot from another database fails');
-$node_master->safe_psql('otherdb', qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]);
+$node_master->safe_psql('otherdb',
+qq[SELECT pg_create_logical_replication_slot('otherdb_slot', 'test_decoding');]
+);
# make sure you can't drop a slot while active
SKIP:
{
- # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
+
+ # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
skip "Test fails on Windows perl", 2 if $Config{osname} eq 'MSWin32';
- my $pg_recvlogical = IPC::Run::start(['pg_recvlogical', '-d', $node_master->connstr('otherdb'), '-S', 'otherdb_slot', '-f', '-', '--start']);
- $node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)");
- is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 3,
- 'dropping a DB with inactive logical slots fails');
+ my $pg_recvlogical = IPC::Run::start(
+ [ 'pg_recvlogical', '-d', $node_master->connstr('otherdb'),
+ '-S', 'otherdb_slot', '-f', '-', '--start' ]);
+ $node_master->poll_query_until('otherdb',
+"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NOT NULL)"
+ );
+ is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
+ 3, 'dropping a DB with inactive logical slots fails');
$pg_recvlogical->kill_kill;
- is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
- 'logical slot still exists');
+ is($node_master->slot('otherdb_slot')->{'slot_name'},
+ undef, 'logical slot still exists');
}
-$node_master->poll_query_until('otherdb', "SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)");
-is($node_master->psql('postgres', 'DROP DATABASE otherdb'), 0,
- 'dropping a DB with inactive logical slots succeeds');
-is($node_master->slot('otherdb_slot')->{'slot_name'}, undef,
- 'logical slot was actually dropped with DB');
+$node_master->poll_query_until('otherdb',
+"SELECT EXISTS (SELECT 1 FROM pg_replication_slots WHERE slot_name = 'otherdb_slot' AND active_pid IS NULL)"
+);
+is($node_master->psql('postgres', 'DROP DATABASE otherdb'),
+ 0, 'dropping a DB with inactive logical slots succeeds');
+is($node_master->slot('otherdb_slot')->{'slot_name'},
+ undef, 'logical slot was actually dropped with DB');
# Restarting a node with wal_level = logical that has existing
# slots must succeed, but decoding from those slots must fail.
$node_master->safe_psql('postgres', 'ALTER SYSTEM SET wal_level = replica');
-is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'logical', 'wal_level is still logical before restart');
+is($node_master->safe_psql('postgres', 'SHOW wal_level'),
+ 'logical', 'wal_level is still logical before restart');
$node_master->restart;
-is($node_master->safe_psql('postgres', 'SHOW wal_level'), 'replica', 'wal_level is replica');
-isnt($node_master->slot('test_slot')->{'catalog_xmin'}, '0',
- 'restored slot catalog_xmin is nonzero');
-is($node_master->psql('postgres', qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]), 3,
+is($node_master->safe_psql('postgres', 'SHOW wal_level'),
+ 'replica', 'wal_level is replica');
+isnt($node_master->slot('test_slot')->{'catalog_xmin'},
+ '0', 'restored slot catalog_xmin is nonzero');
+is( $node_master->psql(
+ 'postgres',
+ qq[SELECT pg_logical_slot_get_changes('test_slot', NULL, NULL);]),
+ 3,
'reading from slot with wal_level < logical fails');
-is($node_master->psql('postgres', q[SELECT pg_drop_replication_slot('test_slot')]), 0,
+is( $node_master->psql(
+ 'postgres', q[SELECT pg_drop_replication_slot('test_slot')]),
+ 0,
'can drop logical slot while wal_level = replica');
is($node_master->slot('test_slot')->{'catalog_xmin'}, '', 'slot was dropped');
# Check that standby1 and standby2 are chosen as sync standbys
# based on their priorities.
test_sync_state(
-$node_master, qq(standby1|1|sync
+ $node_master, qq(standby1|1|sync
standby2|2|sync
standby4|0|async),
-'priority-based sync replication specified by FIRST keyword',
-'FIRST 2(standby1, standby2)');
+ 'priority-based sync replication specified by FIRST keyword',
+ 'FIRST 2(standby1, standby2)');
# Check that all the listed standbys are considered as candidates
# for sync standbys in a quorum-based sync replication.
test_sync_state(
-$node_master, qq(standby1|1|quorum
+ $node_master, qq(standby1|1|quorum
standby2|1|quorum
standby4|0|async),
-'2 quorum and 1 async',
-'ANY 2(standby1, standby2)');
+ '2 quorum and 1 async',
+ 'ANY 2(standby1, standby2)');
# Start Standby3 which will be considered in 'quorum' state.
$node_standby_3->start;
# Check that the setting of 'ANY 2(*)' chooses all standbys as
# candidates for quorum sync standbys.
test_sync_state(
-$node_master, qq(standby1|1|quorum
+ $node_master, qq(standby1|1|quorum
standby2|1|quorum
standby3|1|quorum
standby4|1|quorum),
-'all standbys are considered as candidates for quorum sync standbys',
-'ANY 2(*)');
+ 'all standbys are considered as candidates for quorum sync standbys',
+ 'ANY 2(*)');
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq{
+$node_master->append_conf(
+ 'postgresql.conf', qq{
fsync = on
wal_log_hints = on
max_prepared_transactions = 5
has_streaming => 1);
$node_standby->start;
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
create table testtab (a int, b char(100));
insert into testtab select generate_series(1,1000), 'foo';
insert into testtab select generate_series(1,1000), 'foo';
});
# Take a lock on the table to prevent following vacuum from truncating it
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
begin;
lock table testtab in row share mode;
prepare transaction 'p1';
# Now do some more insert/deletes, another vacuum to ensure full-page writes
# are done
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
insert into testtab select generate_series(1,1000), 'foo';
delete from testtab where ctid > '(8,0)';
vacuum verbose testtab;
$node_standby->psql('postgres', 'checkpoint');
# Release the lock, vacuum again which should lead to truncation
-$node_master->psql('postgres', qq{
+$node_master->psql(
+ 'postgres', qq{
rollback prepared 'p1';
vacuum verbose testtab;
});
$node_master->psql('postgres', 'checkpoint');
my $until_lsn =
- $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
+ $node_master->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
# Wait long enough for standby to receive and apply all WAL
my $caughtup_query =
- "SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
+ "SELECT '$until_lsn'::pg_lsn <= pg_last_wal_replay_lsn()";
$node_standby->poll_query_until('postgres', $caughtup_query)
- or die "Timed out while waiting for standby to catch up";
+ or die "Timed out while waiting for standby to catch up";
# Promote the standby
$node_standby->promote;
-$node_standby->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_standby->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$node_standby->psql('postgres', 'checkpoint');
$node_standby->restart;
# Insert should work on standby
-is($node_standby->psql('postgres',
- qq{insert into testtab select generate_series(1,1000), 'foo';}),
- 0, 'INSERT succeeds with truncated relation FSM');
+is( $node_standby->psql(
+ 'postgres',
+ qq{insert into testtab select generate_series(1,1000), 'foo';}),
+ 0,
+ 'INSERT succeeds with truncated relation FSM');
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
# Setup slave node
my $node_slave = get_new_node('slave');
-$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
+$node_slave->init_from_backup($node_master, 'master_backup',
+ has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
-my $psql_rc = '';
+my $psql_rc = '';
###############################################################################
# Check that we can commit and abort transaction after soft restart.
# files.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
# transaction using dedicated WAL records.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
# Check that WAL replay can handle several transactions with same GID name.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CHECKPOINT;
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
# while replaying transaction commits.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
COMMIT PREPARED 'xact_009_1';");
$node_master->teardown_node;
$node_master->start;
-$psql_rc = $node_master->psql('postgres', "
+$psql_rc = $node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
# Check that WAL replay will cleanup its shared memory state on running slave.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
INSERT INTO t_009_tbl VALUES (43);
PREPARE TRANSACTION 'xact_009_1';
COMMIT PREPARED 'xact_009_1';");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0',
- "Cleanup of shared memory state on running standby without checkpoint");
+ "Cleanup of shared memory state on running standby without checkpoint");
###############################################################################
# Same as in previous case, but let's force checkpoint on slave between
# prepare and commit to use on-disk twophase files.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
PREPARE TRANSACTION 'xact_009_1';");
$node_slave->psql('postgres', "CHECKPOINT");
$node_master->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0',
- "Cleanup of shared memory state on running standby after checkpoint");
+ "Cleanup of shared memory state on running standby after checkpoint");
###############################################################################
# Check that prepared transactions can be committed on promoted slave.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
PREPARE TRANSACTION 'xact_009_1';");
$node_master->teardown_node;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
$psql_rc = $node_slave->psql('postgres', "COMMIT PREPARED 'xact_009_1'");
# change roles
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
# consistent.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (42);
SAVEPOINT s1;
$node_master->stop;
$node_slave->restart;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '1',
- "Restore prepared transactions from files with master down");
+ "Restore prepared transactions from files with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
# restart while master is down.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
INSERT INTO t_009_tbl VALUES (242);
SAVEPOINT s1;
$node_slave->teardown_node;
$node_slave->start;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '1',
- "Restore prepared transactions from records with master down");
+ "Restore prepared transactions from records with master down");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
# XLOG_STANDBY_LOCK wal record.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
CREATE TABLE t_009_tbl2 (id int);
SAVEPOINT s1;
CHECKPOINT;
COMMIT PREPARED 'xact_009_1';");
-$node_slave->psql('postgres', "SELECT count(*) FROM pg_prepared_xacts",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT count(*) FROM pg_prepared_xacts",
+ stdout => \$psql_out);
is($psql_out, '0', "Replay prepared transaction with DDL");
# Initialize master node
my $node_master = get_new_node('master');
$node_master->init(allows_streaming => 1, has_archiving => 1);
-$node_master->append_conf('postgresql.conf', q[
+$node_master->append_conf(
+ 'postgresql.conf', q[
wal_level = 'logical'
max_replication_slots = 3
max_wal_senders = 2
# the same physical copy trick, so:
$node_master->safe_psql('postgres', 'CREATE DATABASE dropme;');
$node_master->safe_psql('dropme',
-"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');"
-);
+"SELECT pg_create_logical_replication_slot('dropme_slot', 'test_decoding');");
$node_master->safe_psql('postgres', 'CHECKPOINT;');
$node_master, $backup_name,
has_streaming => 1,
has_restoring => 1);
-$node_replica->append_conf(
- 'recovery.conf', q[primary_slot_name = 'phys_slot']);
+$node_replica->append_conf('recovery.conf',
+ q[primary_slot_name = 'phys_slot']);
$node_replica->start;
# If we drop 'dropme' on the master, the standby should drop the
# db and associated slot.
-is($node_master->psql('postgres', 'DROP DATABASE dropme'), 0,
- 'dropped DB with logical slot OK on master');
-$node_master->wait_for_catchup($node_replica, 'replay', $node_master->lsn('insert'));
-is($node_replica->safe_psql('postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']), '',
+is($node_master->psql('postgres', 'DROP DATABASE dropme'),
+ 0, 'dropped DB with logical slot OK on master');
+$node_master->wait_for_catchup($node_replica, 'replay',
+ $node_master->lsn('insert'));
+is( $node_replica->safe_psql(
+ 'postgres', q[SELECT 1 FROM pg_database WHERE datname = 'dropme']),
+ '',
'dropped DB dropme on standby');
-is($node_master->slot('dropme_slot')->{'slot_name'}, undef,
- 'logical slot was actually dropped on standby');
+is($node_master->slot('dropme_slot')->{'slot_name'},
+ undef, 'logical slot was actually dropped on standby');
# Back to testing failover...
$node_master->safe_psql('postgres',
# from the master to make sure its hot_standby_feedback
# has locked in a catalog_xmin on the physical slot, and that
# any xmin is < the catalog_xmin
-$node_master->poll_query_until('postgres', q[
+$node_master->poll_query_until(
+ 'postgres', q[
SELECT catalog_xmin IS NOT NULL
FROM pg_replication_slots
WHERE slot_name = 'phys_slot'
]);
my $phys_slot = $node_master->slot('phys_slot');
-isnt($phys_slot->{'xmin'}, '',
- 'xmin assigned on physical slot of master');
-isnt($phys_slot->{'catalog_xmin'}, '',
- 'catalog_xmin assigned on physical slot of master');
+isnt($phys_slot->{'xmin'}, '', 'xmin assigned on physical slot of master');
+isnt($phys_slot->{'catalog_xmin'},
+ '', 'catalog_xmin assigned on physical slot of master');
+
# Ignore wrap-around here, we're on a new cluster:
-cmp_ok($phys_slot->{'xmin'}, '>=', $phys_slot->{'catalog_xmin'},
- 'xmin on physical slot must not be lower than catalog_xmin');
+cmp_ok(
+ $phys_slot->{'xmin'}, '>=',
+ $phys_slot->{'catalog_xmin'},
+ 'xmin on physical slot must not be lower than catalog_xmin');
$node_master->safe_psql('postgres', 'CHECKPOINT');
BEGIN
table public.decoding: INSERT: blah[text]:'after failover'
COMMIT);
-is($stdout, $final_expected_output_bb, 'decoded expected data from slot before_basebackup');
+is($stdout, $final_expected_output_bb,
+ 'decoded expected data from slot before_basebackup');
is($stderr, '', 'replay from slot before_basebackup produces no stderr');
# So far we've peeked the slots, so when we fetch the same info over
# pg_recvlogical we should get complete results. First, find out the commit lsn
# of the last transaction. There's no max(pg_lsn), so:
-my $endpos = $node_replica->safe_psql('postgres', "SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;");
+my $endpos = $node_replica->safe_psql('postgres',
+"SELECT lsn FROM pg_logical_slot_peek_changes('before_basebackup', NULL, NULL) ORDER BY lsn DESC LIMIT 1;"
+);
# now use the walsender protocol to peek the slot changes and make sure we see
# the same results.
-$stdout = $node_replica->pg_recvlogical_upto('postgres', 'before_basebackup',
- $endpos, 30, 'include-xids' => '0', 'skip-empty-xacts' => '1');
+$stdout = $node_replica->pg_recvlogical_upto(
+ 'postgres', 'before_basebackup',
+ $endpos, 30,
+ 'include-xids' => '0',
+ 'skip-empty-xacts' => '1');
# walsender likes to add a newline
chomp($stdout);
-is($stdout, $final_expected_output_bb, 'got same output from walsender via pg_recvlogical on before_basebackup');
+is($stdout, $final_expected_output_bb,
+ 'got same output from walsender via pg_recvlogical on before_basebackup');
$node_replica->teardown_node();
use TestLib;
use Test::More;
use Config;
-if ($Config{osname} eq 'MSWin32')
+if ($Config{osname} eq 'MSWin32')
{
- # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
+
+ # some Windows Perls at least don't like IPC::Run's start/kill_kill regime.
plan skip_all => "Test fails on Windows perl";
}
else
# an xact to be in-progress when we crash and we need to know
# its xid.
my $tx = IPC::Run::start(
- ['psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d', $node->connstr('postgres')],
- '<', \$stdin, '>', \$stdout, '2>', \$stderr);
+ [ 'psql', '-X', '-qAt', '-v', 'ON_ERROR_STOP=1', '-f', '-', '-d',
+ $node->connstr('postgres') ],
+ '<',
+ \$stdin,
+ '>',
+ \$stdout,
+ '2>',
+ \$stderr);
$stdin .= q[
BEGIN;
CREATE TABLE mine(x integer);
my $xid = $stdout;
chomp($xid);
-is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'in progress', 'own xid is in-progres');
+is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
+ 'in progress', 'own xid is in-progres');
# Crash and restart the postmaster
$node->stop('immediate');
$node->start;
# Make sure we really got a new xid
-cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'), '>', $xid,
- 'new xid after restart is greater');
+cmp_ok($node->safe_psql('postgres', 'SELECT txid_current()'),
+ '>', $xid, 'new xid after restart is greater');
+
# and make sure we show the in-progress xact as aborted
-is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]), 'aborted', 'xid is aborted after crash');
+is($node->safe_psql('postgres', qq[SELECT txid_status('$xid');]),
+ 'aborted', 'xid is aborted after crash');
$tx->kill_kill;
# Setup master node
my $node_master = get_new_node("master");
$node_master->init(allows_streaming => 1);
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
max_prepared_transactions = 10
log_checkpoints = true
));
# Setup slave node
my $node_slave = get_new_node('slave');
-$node_slave->init_from_backup($node_master, 'master_backup', has_streaming => 1);
+$node_slave->init_from_backup($node_master, 'master_backup',
+ has_streaming => 1);
$node_slave->start;
# Switch to synchronous replication
-$node_master->append_conf('postgresql.conf', qq(
+$node_master->append_conf(
+ 'postgresql.conf', qq(
synchronous_standby_names = '*'
));
$node_master->psql('postgres', "SELECT pg_reload_conf()");
my $psql_out = '';
-my $psql_rc = '';
+my $psql_rc = '';
###############################################################################
# Check that replay will correctly set SUBTRANS and properly advance nextXid
# so that it won't conflict with savepoint xids.
###############################################################################
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
DELETE FROM t_012_tbl;
INSERT INTO t_012_tbl VALUES (43);
$node_master->stop;
$node_master->start;
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
-- here we can get xid of previous savepoint if nextXid
-- wasn't properly advanced
BEGIN;
ROLLBACK;
COMMIT PREPARED 'xact_012_1';");
-$node_master->psql('postgres', "SELECT count(*) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->psql(
+ 'postgres',
+ "SELECT count(*) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '6', "Check nextXid handling for prepared subtransactions");
###############################################################################
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(127);
COMMIT;");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
# Function borrowed from src/test/regress/sql/hs_primary_extremes.sql
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
CREATE OR REPLACE FUNCTION hs_subxids (n integer)
RETURNS void
LANGUAGE plpgsql
RETURN;
EXCEPTION WHEN raise_exception THEN NULL; END;
\$\$;");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(127);
PREPARE TRANSACTION 'xact_012_1';");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "COMMIT PREPARED 'xact_012_1'");
-is($psql_rc, '0', "Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
-
-$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+is($psql_rc, '0',
+"Restore of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
+);
+
+$node_master->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '8128', "Visible");
$node_master->psql('postgres', "DELETE FROM t_012_tbl");
-$node_master->psql('postgres', "
+$node_master->psql(
+ 'postgres', "
BEGIN;
SELECT hs_subxids(201);
PREPARE TRANSACTION 'xact_012_1';");
-$node_master->wait_for_catchup($node_slave, 'replay', $node_master->lsn('insert'));
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_master->wait_for_catchup($node_slave, 'replay',
+ $node_master->lsn('insert'));
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
$node_master->stop;
$node_slave->promote;
-$node_slave->poll_query_until('postgres',
- "SELECT NOT pg_is_in_recovery()")
+$node_slave->poll_query_until('postgres', "SELECT NOT pg_is_in_recovery()")
or die "Timed out while waiting for promotion of standby";
-$node_slave->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+$node_slave->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
# restore state
($node_master, $node_slave) = ($node_slave, $node_master);
$node_slave->enable_streaming($node_master);
-$node_slave->append_conf('recovery.conf', qq(
+$node_slave->append_conf(
+ 'recovery.conf', qq(
recovery_target_timeline='latest'
));
$node_slave->start;
$psql_rc = $node_master->psql('postgres', "ROLLBACK PREPARED 'xact_012_1'");
-is($psql_rc, '0', "Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave");
-
-$node_master->psql('postgres', "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
- stdout => \$psql_out);
+is($psql_rc, '0',
+"Rollback of PGPROC_MAX_CACHED_SUBXIDS+ prepared transaction on promoted slave"
+);
+
+$node_master->psql(
+ 'postgres',
+ "SELECT coalesce(sum(id),-1) FROM t_012_tbl",
+ stdout => \$psql_out);
is($psql_out, '-1', "Not visible");
open my $sslconf, '>', "$pgdata/sslconfig.conf";
close $sslconf;
- # Copy all server certificates and keys, and client root cert, to the data dir
+# Copy all server certificates and keys, and client root cert, to the data dir
copy_files("ssl/server-*.crt", $pgdata);
copy_files("ssl/server-*.key", $pgdata);
chmod(0600, glob "$pgdata/server-*.key") or die $!;
copy_files("ssl/root+client_ca.crt", $pgdata);
- copy_files("ssl/root_ca.crt", $pgdata);
+ copy_files("ssl/root_ca.crt", $pgdata);
copy_files("ssl/root+client.crl", $pgdata);
# Stop and restart server to load new listen_addresses.
{
my $node = $_[0];
my $certfile = $_[1];
- my $cafile = $_[2] || "root+client_ca";
+ my $cafile = $_[2] || "root+client_ca";
my $pgdata = $node->data_dir;
- note "reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
+ note
+ "reloading server with certfile \"$certfile\" and cafile \"$cafile\"";
open my $sslconf, '>', "$pgdata/sslconfig.conf";
print $sslconf "ssl=on\n";
my $serverhost = $_[1];
my $pgdata = $node->data_dir;
- # Only accept SSL connections from localhost. Our tests don't depend on this
- # but seems best to keep it as narrow as possible for security reasons.
- #
- # When connecting to certdb, also check the client certificate.
+ # Only accept SSL connections from localhost. Our tests don't depend on this
+ # but seems best to keep it as narrow as possible for security reasons.
+ #
+ # When connecting to certdb, also check the client certificate.
open my $hba, '>', "$pgdata/pg_hba.conf";
print $hba
"# TYPE DATABASE USER ADDRESS METHOD\n";
"CREATE TABLE tab_rep (a int primary key)");
# Setup structure on subscriber
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_notrep (a int)");
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_ins (a int)");
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_full (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_notrep (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_ins (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_full (a int)");
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_rep (a int primary key)");
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-$node_publisher->safe_psql('postgres',
- "CREATE PUBLICATION tap_pub");
+$node_publisher->safe_psql('postgres', "CREATE PUBLICATION tap_pub");
$node_publisher->safe_psql('postgres',
"CREATE PUBLICATION tap_pub_ins_only WITH (publish = insert)");
$node_publisher->safe_psql('postgres',
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub, tap_pub_ins_only"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1,50)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_ins WHERE a > 20");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_ins SET a = -a");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_ins SET a = -a");
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_rep SELECT generate_series(1,50)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_rep WHERE a > 20");
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_rep SET a = -a");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep WHERE a > 20");
+$node_publisher->safe_psql('postgres', "UPDATE tab_rep SET a = -a");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
is($result, qq(1052|1|1002), 'check replicated inserts on subscriber');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
is($result, qq(20|-20|-1), 'check replicated changes on subscriber');
# insert some duplicate rows
"ALTER TABLE tab_ins REPLICA IDENTITY FULL");
# and do the update
-$node_publisher->safe_psql('postgres',
- "UPDATE tab_full SET a = a * a");
+$node_publisher->safe_psql('postgres', "UPDATE tab_full SET a = a * a");
# Wait for subscription to catch up
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
-is($result, qq(20|1|100), 'update works with REPLICA IDENTITY FULL and duplicate tuples');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
+is($result, qq(20|1|100),
+ 'update works with REPLICA IDENTITY FULL and duplicate tuples');
# check that change of connection string and/or publication list causes
# restart of subscription workers. Not all of these are registered as tests
# as we need to poll for a change but the test suite will fail none the less
# when something goes wrong.
my $oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'");
+"ALTER SUBSCRIPTION tap_sub CONNECTION 'application_name=$appname $publisher_connstr'"
+);
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
$oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)");
+"ALTER SUBSCRIPTION tap_sub SET PUBLICATION tap_pub_ins_only REFRESH WITH (copy_data = false)"
+);
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
$node_publisher->safe_psql('postgres',
"INSERT INTO tab_ins SELECT generate_series(1001,1100)");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_rep");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_rep");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1152|1|1100), 'check replicated inserts after subscription publication change');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1152|1|1100),
+ 'check replicated inserts after subscription publication change');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_rep");
-is($result, qq(20|-20|-1), 'check changes skipped after subscription publication change');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_rep");
+is($result, qq(20|-20|-1),
+ 'check changes skipped after subscription publication change');
# check alter publication (relcache invalidation etc)
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only SET (publish = 'insert, delete')");
$node_publisher->safe_psql('postgres',
"ALTER PUBLICATION tap_pub_ins_only ADD TABLE tab_full");
-$node_publisher->safe_psql('postgres',
- "DELETE FROM tab_ins WHERE a > 0");
+$node_publisher->safe_psql('postgres', "DELETE FROM tab_ins WHERE a > 0");
$node_subscriber->safe_psql('postgres',
- "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)");
-$node_publisher->safe_psql('postgres',
- "INSERT INTO tab_full VALUES(0)");
+ "ALTER SUBSCRIPTION tap_sub REFRESH PUBLICATION WITH (copy_data = false)"
+);
+$node_publisher->safe_psql('postgres', "INSERT INTO tab_full VALUES(0)");
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
# note that data are different on provider and subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_ins");
-is($result, qq(1052|1|1002), 'check replicated deletes after alter publication');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_ins");
+is($result, qq(1052|1|1002),
+ 'check replicated deletes after alter publication');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(a), max(a) FROM tab_full");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(a), max(a) FROM tab_full");
is($result, qq(21|0|100), 'check replicated insert after alter publication');
# check restart on rename
$oldpid = $node_publisher->safe_psql('postgres',
- "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';");
+ "SELECT pid FROM pg_stat_replication WHERE application_name = '$appname';"
+);
$node_subscriber->safe_psql('postgres',
"ALTER SUBSCRIPTION tap_sub RENAME TO tap_sub_renamed");
$node_publisher->poll_query_until('postgres',
- "SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for apply to restart";
+"SELECT pid != $oldpid FROM pg_stat_replication WHERE application_name = '$appname';"
+) or die "Timed out while waiting for apply to restart";
# check all the cleanup
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub_renamed");
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'check subscription was dropped on subscriber');
-$result =
- $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription_rel");
-is($result, qq(0), 'check subscription relation status was dropped on subscriber');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription_rel");
+is($result, qq(0),
+ 'check subscription relation status was dropped on subscriber');
-$result =
- $node_publisher->safe_psql('postgres', "SELECT count(*) FROM pg_replication_slots");
+$result = $node_publisher->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_slots");
is($result, qq(0), 'check replication slot was dropped on publisher');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_replication_origin");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_replication_origin");
is($result, qq(0), 'check replication origin was dropped on subscriber');
$node_subscriber->stop('fast');
$node_subscriber->start;
# Create some preexisting content on publisher
-my $ddl = qq(
+my $ddl = qq(
CREATE EXTENSION hstore WITH SCHEMA public;
CREATE TABLE public.tst_one_array (
a INTEGER PRIMARY KEY,
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (slot_name = tap_sub_slot)"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
or die "Timed out while waiting for subscriber to synchronize data";
# Insert initial test data
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
-- test_tbl_one_array_col
INSERT INTO tst_one_array (a, b) VALUES
(1, '{1, 2, 3}'),
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-my $result = $node_subscriber->safe_psql('postgres', qq(
+my $result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '1|{1,2,3}
+is( $result, '1|{1,2,3}
2|{2,3,1}
3|{3,2,1}
4|{4,3,2}
2|"zzz"=>"foo"
3|"123"=>"321"
4|"yellow horse"=>"moaned"',
-'check replicated inserts on subscriber');
+ 'check replicated inserts on subscriber');
# Run batch of updates
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
UPDATE tst_one_array SET b = '{4, 5, 6}' WHERE a = 1;
UPDATE tst_one_array SET b = '{4, 5, 6, 1}' WHERE a > 3;
UPDATE tst_arrays SET b = '{"1a", "2b", "3c"}', c = '{1.0, 2.0, 3.0}', d = '{"1 day 1 second", "2 days 2 seconds", "3 days 3 second"}' WHERE a = '{1, 2, 3}';
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-$result = $node_subscriber->safe_psql('postgres', qq(
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '1|{4,5,6}
+is( $result, '1|{4,5,6}
2|{2,3,1}
3|{3,2,1}
4|{4,5,6,1}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
-'check replicated updates on subscriber');
+ 'check replicated updates on subscriber');
# Run batch of deletes
-$node_publisher->safe_psql('postgres', qq(
+$node_publisher->safe_psql(
+ 'postgres', qq(
DELETE FROM tst_one_array WHERE a = 1;
DELETE FROM tst_one_array WHERE b = '{2, 3, 1}';
DELETE FROM tst_arrays WHERE a = '{1, 2, 3}';
or die "Timed out while waiting for subscriber to catch up";
# Check the data on subscriber
-$result = $node_subscriber->safe_psql('postgres', qq(
+$result = $node_subscriber->safe_psql(
+ 'postgres', qq(
SET timezone = '+2';
SELECT a, b FROM tst_one_array ORDER BY a;
SELECT a, b, c, d FROM tst_arrays ORDER BY a;
SELECT a, b FROM tst_hstore ORDER BY a;
));
-is($result, '3|{3,2,1}
+is( $result, '3|{3,2,1}
4|{4,5,6,1}
5|{4,5,6,1}
{3,1,2}|{c,a,b}|{3.3,1.1,2.2}|{"3 years","1 year","2 years"}
2|"updated"=>"value"
3|"also"=>"updated"
4|"yellow horse"=>"moaned"',
-'check replicated deletes on subscriber');
+ 'check replicated deletes on subscriber');
$node_subscriber->stop('fast');
$node_publisher->stop('fast');
$node_publisher->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_publisher->safe_psql('postgres',
- "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
+"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
+);
# Setup structure on subscriber
$node_subscriber->safe_psql('postgres',
"CREATE TABLE tab_fk (bid int PRIMARY KEY);");
$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));");
+"CREATE TABLE tab_fk_ref (id int PRIMARY KEY, bid int REFERENCES tab_fk (bid));"
+);
# Setup logical replication
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
or die "Timed out while waiting for subscriber to catch up";
# Check data on subscriber
-my $result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
+my $result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk;");
is($result, qq(1|1|1), 'check replicated tab_fk inserts on subscriber');
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(1|1|1), 'check replicated tab_fk_ref inserts on subscriber');
# Drop the fk on publisher
-$node_publisher->safe_psql('postgres',
- "DROP TABLE tab_fk CASCADE;");
+$node_publisher->safe_psql('postgres', "DROP TABLE tab_fk CASCADE;");
# Insert data
$node_publisher->safe_psql('postgres',
or die "Timed out while waiting for subscriber to catch up";
# FK is not enforced on subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check FK ignored on subscriber');
# Add replica trigger
-$node_subscriber->safe_psql('postgres', qq{
+$node_subscriber->safe_psql(
+ 'postgres', qq{
CREATE FUNCTION filter_basic_dml_fn() RETURNS TRIGGER AS \$\$
BEGIN
IF (TG_OP = 'INSERT') THEN
or die "Timed out while waiting for subscriber to catch up";
# The row should be skipped on subscriber
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*), min(bid), max(bid) FROM tab_fk_ref;");
is($result, qq(2|1|2), 'check replica trigger applied on subscriber');
$node_subscriber->stop('fast');
# Create subscriber node
my $node_subscriber = get_new_node('subscriber');
$node_subscriber->init(allows_streaming => 'logical');
-$node_subscriber->append_conf('postgresql.conf', "wal_retrieve_retry_interval = 1ms");
+$node_subscriber->append_conf('postgresql.conf',
+ "wal_retrieve_retry_interval = 1ms");
$node_subscriber->start;
# Create some preexisting content on publisher
my $appname = 'tap_sub';
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# Wait for subscriber to finish initialization
my $caughtup_query =
# recreate the subscription, it will try to do initial copy
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# but it will be stuck on data copy as it will fail on constraint
-my $started_query =
-"SELECT srsubstate = 'd' FROM pg_subscription_rel;";
+my $started_query = "SELECT srsubstate = 'd' FROM pg_subscription_rel;";
$node_subscriber->poll_query_until('postgres', $started_query)
or die "Timed out while waiting for subscriber to start sync";
# remove the conflicting data
-$node_subscriber->safe_psql('postgres',
- "DELETE FROM tab_rep;");
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# wait for sync to finish this time
$node_subscriber->poll_query_until('postgres', $synced_query)
# now check another subscription for the same node pair
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)");
+"CREATE SUBSCRIPTION tap_sub2 CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub WITH (copy_data = false)"
+);
# wait for it to start
-$node_subscriber->poll_query_until('postgres', "SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL")
- or die "Timed out while waiting for subscriber to start";
+$node_subscriber->poll_query_until('postgres',
+"SELECT pid IS NOT NULL FROM pg_stat_subscription WHERE subname = 'tap_sub2' AND relid IS NULL"
+) or die "Timed out while waiting for subscriber to start";
# and drop both subscriptions
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub2");
# check subscriptions are removed
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM pg_subscription");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM pg_subscription");
is($result, qq(0), 'second and third sub are dropped');
# remove the conflicting data
-$node_subscriber->safe_psql('postgres',
- "DELETE FROM tab_rep;");
+$node_subscriber->safe_psql('postgres', "DELETE FROM tab_rep;");
# recreate the subscription again
$node_subscriber->safe_psql('postgres',
- "CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub");
+"CREATE SUBSCRIPTION tap_sub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION tap_pub"
+);
# and wait for data sync to finish again
$node_subscriber->poll_query_until('postgres', $synced_query)
is($result, qq(20), 'initial data synced for fourth sub');
# add new table on subscriber
-$node_subscriber->safe_psql('postgres',
- "CREATE TABLE tab_rep_next (a int)");
+$node_subscriber->safe_psql('postgres', "CREATE TABLE tab_rep_next (a int)");
# setup structure with existing data on pubisher
$node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
is($result, qq(0), 'no data for table added after subscription initialized');
# ask for data sync
$node_subscriber->poll_query_until('postgres', $synced_query)
or die "Timed out while waiting for subscriber to synchronize data";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
-is($result, qq(10), 'data for table added after subscription initialized are now synced');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(10),
+ 'data for table added after subscription initialized are now synced');
# Add some data
$node_publisher->safe_psql('postgres',
$node_publisher->poll_query_until('postgres', $caughtup_query)
or die "Timed out while waiting for subscriber to catch up";
-$result =
- $node_subscriber->safe_psql('postgres', "SELECT count(*) FROM tab_rep_next");
-is($result, qq(20), 'changes for table added after subscription initialized replicated');
+$result = $node_subscriber->safe_psql('postgres',
+ "SELECT count(*) FROM tab_rep_next");
+is($result, qq(20),
+ 'changes for table added after subscription initialized replicated');
$node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
my ($node, $appname) = @_;
$node->poll_query_until('postgres',
- "SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';")
- or die "Timed out while waiting for subscriber to catch up";
+"SELECT pg_current_wal_lsn() <= replay_lsn FROM pg_stat_replication WHERE application_name = '$appname';"
+ ) or die "Timed out while waiting for subscriber to catch up";
}
my $node_publisher = get_new_node('publisher');
-$node_publisher->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=UTF8']);
+$node_publisher->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=UTF8' ]);
$node_publisher->start;
my $node_subscriber = get_new_node('subscriber');
-$node_subscriber->init(allows_streaming => 'logical', extra => ['--locale=C', '--encoding=LATIN1']);
+$node_subscriber->init(
+ allows_streaming => 'logical',
+ extra => [ '--locale=C', '--encoding=LATIN1' ]);
$node_subscriber->start;
my $ddl = "CREATE TABLE test1 (a int, b text);";
$node_subscriber->safe_psql('postgres', $ddl);
my $publisher_connstr = $node_publisher->connstr . ' dbname=postgres';
-my $appname = 'encoding_test';
+my $appname = 'encoding_test';
-$node_publisher->safe_psql('postgres', "CREATE PUBLICATION mypub FOR ALL TABLES;");
-$node_subscriber->safe_psql('postgres', "CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;");
+$node_publisher->safe_psql('postgres',
+ "CREATE PUBLICATION mypub FOR ALL TABLES;");
+$node_subscriber->safe_psql('postgres',
+"CREATE SUBSCRIPTION mysub CONNECTION '$publisher_connstr application_name=$appname' PUBLICATION mypub;"
+);
wait_for_caught_up($node_publisher, $appname);
-$node_publisher->safe_psql('postgres', q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
+$node_publisher->safe_psql('postgres',
+ q{INSERT INTO test1 VALUES (1, E'Mot\xc3\xb6rhead')}); # hand-rolled UTF-8
wait_for_caught_up($node_publisher, $appname);
-is($node_subscriber->safe_psql('postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}), # LATIN1
- qq(1),
- 'data replicated to subscriber');
+is( $node_subscriber->safe_psql(
+ 'postgres', q{SELECT a FROM test1 WHERE b = E'Mot\xf6rhead'}
+ ), # LATIN1
+ qq(1),
+ 'data replicated to subscriber');
$node_subscriber->stop;
$node_publisher->stop;
exit 0 if $1 >= 36;
# Apply the desired patch.
-$ccode =~ s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/
+$ccode =~
+s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This var may be unused depending upon options. \*/
.*?)
return yy_is_jam \? 0 : yy_current_state;
|$1
my $insttype;
my @client_contribs = ('oid2name', 'pgbench', 'vacuumlo');
my @client_program_files = (
- 'clusterdb', 'createdb', 'createuser',
- 'dropdb', 'dropuser', 'ecpg',
- 'libecpg', 'libecpg_compat', 'libpgtypes', 'libpq',
- 'pg_basebackup', 'pg_config', 'pg_dump', 'pg_dumpall',
- 'pg_isready', 'pg_receivewal', 'pg_recvlogical', 'pg_restore',
- 'psql', 'reindexdb', 'vacuumdb', @client_contribs);
+ 'clusterdb', 'createdb', 'createuser', 'dropdb',
+ 'dropuser', 'ecpg', 'libecpg', 'libecpg_compat',
+ 'libpgtypes', 'libpq', 'pg_basebackup', 'pg_config',
+ 'pg_dump', 'pg_dumpall', 'pg_isready', 'pg_receivewal',
+ 'pg_recvlogical', 'pg_restore', 'psql', 'reindexdb',
+ 'vacuumdb', @client_contribs);
sub lcopy
{
print "Generating timezone files...";
- my @args = ("$conf/zic/zic", '-d', "$target/share/timezone",
- '-p', "$posixrules");
+ my @args =
+ ("$conf/zic/zic", '-d', "$target/share/timezone", '-p', "$posixrules");
foreach (@tzfiles)
{
my $tzfile = $_;
my @contrib_uselibpgport = ('oid2name', 'pg_standby', 'vacuumlo');
my @contrib_uselibpgcommon = ('oid2name', 'pg_standby', 'vacuumlo');
my $contrib_extralibs = undef;
-my $contrib_extraincludes =
- { 'dblink' => ['src/backend'] };
+my $contrib_extraincludes = { 'dblink' => ['src/backend'] };
my $contrib_extrasource = {
'cube' => [ 'contrib/cube/cubescan.l', 'contrib/cube/cubeparse.y' ],
'seg' => [ 'contrib/seg/segscan.l', 'contrib/seg/segparse.y' ], };
my @frontend_uselibpgport = (
'pg_archivecleanup', 'pg_test_fsync',
'pg_test_timing', 'pg_upgrade',
- 'pg_waldump', 'pgbench');
+ 'pg_waldump', 'pgbench');
my @frontend_uselibpgcommon = (
'pg_archivecleanup', 'pg_test_fsync',
'pg_test_timing', 'pg_upgrade',
- 'pg_waldump', 'pgbench');
+ 'pg_waldump', 'pgbench');
my $frontend_extralibs = {
'initdb' => ['ws2_32.lib'],
'pg_restore' => ['ws2_32.lib'],
'pgbench' =>
[ 'src/bin/pgbench/exprscan.l', 'src/bin/pgbench/exprparse.y' ] };
my @frontend_excludes = (
- 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
+ 'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
'pg_waldump', 'scripts');
sub mkvcbuild
}
}
die "Unable to find $solution->{options}->{tcl}/lib/tcl<version>.lib"
- unless $found;
+ unless $found;
}
$libpq = $solution->AddProject('libpq', 'dll', 'interfaces',
$libpqwalreceiver->AddIncludeDir('src/interfaces/libpq');
$libpqwalreceiver->AddReference($postgres, $libpq);
- my $pgoutput = $solution->AddProject(
- 'pgoutput', 'dll', '',
+ my $pgoutput = $solution->AddProject('pgoutput', 'dll', '',
'src/backend/replication/pgoutput');
$pgoutput->AddReference($postgres);
'hstore_plpython' . $pymajorver, 'contrib/hstore_plpython',
'plpython' . $pymajorver, 'src/pl/plpython',
'hstore', 'contrib/hstore');
- $hstore_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
+ $hstore_plpython->AddDefine(
+ 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
my $ltree_plpython = AddTransformModule(
'ltree_plpython' . $pymajorver, 'contrib/ltree_plpython',
'plpython' . $pymajorver, 'src/pl/plpython',
'ltree', 'contrib/ltree');
- $ltree_plpython->AddDefine('PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
+ $ltree_plpython->AddDefine(
+ 'PLPYTHON_LIBNAME="plpython' . $pymajorver . '"');
}
if ($solution->{options}->{perl})
$plperl->AddReference($postgres);
my $perl_path = $solution->{options}->{perl} . '\lib\CORE\perl*.lib';
my @perl_libs =
- grep { /perl\d+.lib$/ }
- glob($perl_path);
+ grep { /perl\d+.lib$/ } glob($perl_path);
if (@perl_libs == 1)
{
$plperl->AddLibrary($perl_libs[0]);
}
else
{
- die "could not identify perl library version matching pattern $perl_path\n";
+ die
+"could not identify perl library version matching pattern $perl_path\n";
}
# Add transform module dependent on plperl
{
s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"};
s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
- s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"};
+s{PG_VERSION_STR "[^"]+"}{PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " CppAsString2(_MSC_VER) ", $bits-bit"};
print $o $_;
}
print $o "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
print $o "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
print $o "#define ENABLE_NLS 1\n" if ($self->{options}->{nls});
- print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
+ print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize},
+ "\n";
print $o "#define RELSEG_SIZE ",
(1024 / $self->{options}->{blocksize}) *
$self->{options}->{segsize} *
'src/include/utils/fmgrprotos.h',
'src/backend/utils/fmgrprotos.h'))
{
- copyFile('src/backend/utils/fmgrprotos.h',
+ copyFile(
+ 'src/backend/utils/fmgrprotos.h',
'src/include/utils/fmgrprotos.h');
}
my $self = shift;
my $cfg = '--enable-thread-safety';
- $cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
+ $cfg .= ' --enable-cassert' if ($self->{options}->{asserts});
$cfg .= ' --enable-nls' if ($self->{options}->{nls});
$cfg .= ' --enable-tap-tests' if ($self->{options}->{tap_tests});
$cfg .= ' --with-ldap' if ($self->{options}->{ldap});
# check what sort of build we are doing
-my $bconf = $ENV{CONFIG} || "Release";
+my $bconf = $ENV{CONFIG} || "Release";
my $msbflags = $ENV{MSBFLAGS} || "";
-my $buildwhat = $ARGV[1] || "";
+my $buildwhat = $ARGV[1] || "";
if (uc($ARGV[0]) eq 'DEBUG')
{
$bconf = "Debug";
if ($buildwhat and $vcver >= 10.00)
{
system(
- "msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf"
+"msbuild $buildwhat.vcxproj /verbosity:normal $msbflags /p:Configuration=$bconf"
);
}
elsif ($buildwhat)
}
else
{
- system("msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf");
+ system(
+"msbuild pgsql.sln /verbosity:normal $msbflags /p:Configuration=$bconf");
}
# report status
our $config = {
asserts => 0, # --enable-cassert
- # float4byval=>1, # --disable-float4-byval, on by default
+ # float4byval=>1, # --disable-float4-byval, on by default
# float8byval=> $platformbits == 64, # --disable-float8-byval,
# off by default on 32 bit platforms, on by default on 64 bit platforms
my %def = ();
-while (<$ARGV[0]/*.obj>) ## no critic (RequireGlobFunction);
+while (<$ARGV[0]/*.obj>) ## no critic (RequireGlobFunction);
{
my $objfile = $_;
my $symfile = $objfile;
system("flex $flexflags -o$output $input");
if ($? == 0)
{
+
# Check for "%option reentrant" in .l file.
my $lfile;
open($lfile, '<', $input) || die "opening $input for reading: $!";
close($lfile);
if ($lcode =~ /\%option\sreentrant/)
{
+
# Reentrant scanners usually need a fix to prevent
# "unused variable" warnings with older flex versions.
system("perl src\\tools\\fix-old-flex-code.pl $output");
}
else
{
+
# For non-reentrant scanners we need to fix up the yywrap
# macro definition to keep the MS compiler happy.
# For reentrant scanners (like the core scanner) we do not
chdir $dir;
my @flags;
- @flags = split(/\s+/,$ENV{PROVE_FLAGS}) if exists $ENV{PROVE_FLAGS};
+ @flags = split(/\s+/, $ENV{PROVE_FLAGS}) if exists $ENV{PROVE_FLAGS};
my @args = ("prove", @flags, "t/*.pl");
$m =~ s{\\\r?\n}{}g;
if ($m =~ /^\s*REGRESS_OPTS\s*\+?=(.*)/m)
{
+
# Substitute known Makefile variables, then ignore options that retain
# an unhandled variable reference. Ignore anything that isn't an
# option starting with "--".
sub run_build
{
- eval "use LWP::Simple;"; ## no critic (ProhibitStringyEval);
+ eval "use LWP::Simple;"; ## no critic (ProhibitStringyEval);
my $code_base = shift || '.';
my $save_dir = getcwd();