* Modifications - 30-Oct-2000 - pjw@rhyme.com.au
* Added {Start,End}RestoreBlobs to allow extended TX during BLOB restore.
*
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ * - strdup() the current user just in case it's deallocated from it's TOC
+ * entry. Should *never* happen, but that's what they said about the
+ * Titanic...
+ *
+ * - Check results of IO routines more carefully.
+ *
*-------------------------------------------------------------------------
*/
/* Public */
void CloseArchive(Archive* AHX)
{
+ int res = 0;
ArchiveHandle* AH = (ArchiveHandle*)AHX;
(*AH->ClosePtr)(AH);
/* Close the output */
if (AH->gzOut)
- GZCLOSE(AH->OF);
+ res = GZCLOSE(AH->OF);
else if (AH->OF != stdout)
- fclose(AH->OF);
+ res = fclose(AH->OF);
+
+ if (res != 0)
+ die_horribly(AH, "%s: could not close the output file in CloseArchive\n", progname);
}
/* Public */
/* Setup the file */
fh = fopen(ropt->tocFile, PG_BINARY_R);
- if (!fh)
- die_horribly(AH, "%s: could not open TOC file\n", progname);
+ if (!fh)
+ die_horribly(AH, "%s: could not open TOC file\n", progname);
while (fgets(buf, 1024, fh) != NULL)
{
tePrev = te;
}
- fclose(fh);
+ if (fclose(fh) != 0)
+ die_horribly(AH, "%s: could not close TOC file\n", progname);
}
/**********************
#ifdef HAVE_LIBZ
if (compression != 0)
{
- sprintf(fmode, "wb%d", compression);
- if (fn) {
- AH->OF = gzdopen(dup(fn), fmode); /* Don't use PG_BINARY_x since this is zlib */
- } else {
- AH->OF = gzopen(filename, fmode);
- }
- AH->gzOut = 1;
+ sprintf(fmode, "wb%d", compression);
+ if (fn) {
+ AH->OF = gzdopen(dup(fn), fmode); /* Don't use PG_BINARY_x since this is zlib */
+ } else {
+ AH->OF = gzopen(filename, fmode);
+ }
+ AH->gzOut = 1;
} else { /* Use fopen */
#endif
- if (fn) {
- AH->OF = fdopen(dup(fn), PG_BINARY_W);
- } else {
- AH->OF = fopen(filename, PG_BINARY_W);
- }
- AH->gzOut = 0;
+ if (fn) {
+ AH->OF = fdopen(dup(fn), PG_BINARY_W);
+ } else {
+ AH->OF = fopen(filename, PG_BINARY_W);
+ }
+ AH->gzOut = 0;
#ifdef HAVE_LIBZ
}
#endif
+ if (!AH->OF)
+ die_horribly(AH, "%s: could not set output\n", progname);
+
return sav;
}
void ResetOutput(ArchiveHandle* AH, OutputContext sav)
{
+ int res;
+
if (AH->gzOut)
- GZCLOSE(AH->OF);
+ res = GZCLOSE(AH->OF);
else
- fclose(AH->OF);
+ res = fclose(AH->OF);
+
+ if (res != 0)
+ die_horribly(AH, "%s: could not reset the output file\n", progname);
AH->gzOut = sav.gzOut;
AH->OF = sav.OF;
return res;
}
else if (AH->gzOut)
- return GZWRITE((void*)ptr, size, nmemb, AH->OF);
+ {
+ res = GZWRITE((void*)ptr, size, nmemb, AH->OF);
+ if (res != (nmemb * size))
+ die_horribly(AH, "%s: could not write to archive\n", progname);
+ return res;
+ }
else if (AH->CustomOutPtr)
- return AH->CustomOutPtr(AH, ptr, size * nmemb);
+ {
+ res = AH->CustomOutPtr(AH, ptr, size * nmemb);
+ if (res != (nmemb * size))
+ die_horribly(AH, "%s: could not write to custom output routine\n", progname);
+ return res;
+ }
else
{
/*
* then send it to the DB.
*/
if (RestoringToDB(AH))
- return ExecuteSqlCommandBuf(AH, (void*)ptr, size*nmemb);
+ return ExecuteSqlCommandBuf(AH, (void*)ptr, size*nmemb); /* Always 1, currently */
else
- return fwrite((void*)ptr, size, nmemb, AH->OF);
+ {
+ res = fwrite((void*)ptr, size, nmemb, AH->OF);
+ if (res != nmemb)
+ die_horribly(AH, "%s: could not write to output file (%d != %d)\n", progname, res, nmemb);
+ return res;
+ }
}
}
/* Close the file */
if (wantClose)
- fclose(fh);
+ if (fclose(fh) != 0)
+ die_horribly(AH, "%s: could not close the input file after reading header\n", progname);
return AH->format;
}
AH->fSpec = NULL;
}
- AH->currUser = "";
+ AH->currUser = strdup(""); /* So it's valid, but we can free() it later if necessary */
AH->toc = (TocEntry*)calloc(1, sizeof(TocEntry));
if (!AH->toc)
if (AH->WriteExtraTocPtr) {
(*AH->WriteExtraTocPtr)(AH, te);
}
- te = te->next;
+ te = te->next;
}
}
{
ahprintf(AH, "\\connect %s %s\n", dbname, user);
}
- AH->currUser = user;
+ if (AH->currUser)
+ {
+ free(AH->currUser);
+ }
+
+ AH->currUser = strdup(user);
}
}
#define GZREAD(p, s, n, fh) gzread(fh, p, n * s)
#else
#define GZCLOSE(fh) fclose(fh)
-#define GZWRITE(p, s, n, fh) fwrite(p, s, n, fh)
+#define GZWRITE(p, s, n, fh) (fwrite(p, s, n, fh) * s)
#define GZREAD(p, s, n, fh) fread(p, s, n, fh)
#define Z_DEFAULT_COMPRESSION -1
#define K_VERS_MAJOR 1
#define K_VERS_MINOR 4
-#define K_VERS_REV 22
+#define K_VERS_REV 23
/* Data block types */
#define BLK_DATA 1
*
* Initial version.
*
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ *
+ * - Check results of IO routines more carefully.
+ *
*-------------------------------------------------------------------------
*/
AH->FH = stdout;
}
- if (!AH)
+ if (!AH->FH)
die_horribly(AH, "%s: unable to open archive file %s",progname, AH->fSpec);
ctx->hasSeek = (fseek(AH->FH, 0, SEEK_CUR) == 0);
} else {
AH->FH = stdin;
}
- if (!AH)
+ if (!AH->FH)
die_horribly(AH, "%s: unable to open archive file %s",progname, AH->fSpec);
ctx->hasSeek = (fseek(AH->FH, 0, SEEK_CUR) == 0);
res = fputc(i, AH->FH);
if (res != EOF) {
ctx->filePos += 1;
+ } else {
+ die_horribly(AH, "%s: could not write byte./n",progname);
}
return res;
}
lclContext* ctx = (lclContext*)AH->formatData;
int res;
res = fwrite(buf, 1, len, AH->FH);
+
+ if (res != len)
+ die_horribly(AH, "%s: write error in _WriteBuf (%d != %d)\n", progname, res, len);
+
ctx->filePos += res;
return res;
}
}
}
- fclose(AH->FH);
+ if (fclose(AH->FH) != 0)
+ die_horribly(AH, "%s: could not close archive file\n",progname);
+
AH->FH = NULL;
}
if (zp->avail_out < zlibOutSize) {
/* printf("Wrote %d byte deflated chunk\n", zlibOutSize - zp->avail_out); */
WriteInt(AH, zlibOutSize - zp->avail_out);
- fwrite(out, 1, zlibOutSize - zp->avail_out, AH->FH);
+ if (fwrite(out, 1, zlibOutSize - zp->avail_out, AH->FH) != (zlibOutSize - zp->avail_out))
+ die_horribly(AH, "%s: could write compressed chunk\n",progname);
ctx->filePos += zlibOutSize - zp->avail_out;
}
zp->next_out = out;
if (zp->avail_in > 0)
{
WriteInt(AH, zp->avail_in);
- fwrite(zp->next_in, 1, zp->avail_in, AH->FH);
+ if (fwrite(zp->next_in, 1, zp->avail_in, AH->FH) != zp->avail_in)
+ die_horribly(AH, "%s: could write uncompressed chunk\n", progname);
ctx->filePos += zp->avail_in;
zp->avail_in = 0;
} else {
/*-------------------------------------------------------------------------
*
+ * pg_backup_db.c
+ *
+ * Implements the basic DB functions used by the archiver.
+ *
+ * IDENTIFICATION
+ *
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ *
+ * - Check results of PQ routines more carefully.
*
*-------------------------------------------------------------------------
*/
/* fprintf(stderr, "Sending '%s' via COPY (at end = %d)\n\n", AH->pgCopyBuf->data, isEnd); */
- PQputline(AH->connection, AH->pgCopyBuf->data);
+ if (PQputline(AH->connection, AH->pgCopyBuf->data) != 0)
+ die_horribly(AH, "%s: error returned by PQputline\n", progname);
resetPQExpBuffer(AH->pgCopyBuf);
/* fprintf(stderr, "Buffer is '%s'\n", AH->pgCopyBuf->data); */
if(isEnd) {
- PQendcopy(AH->connection);
+ if (PQendcopy(AH->connection) != 0)
+ die_horribly(AH, "%s: error returned by PQendcopy\n", progname);
+
AH->pgCopyIn = 0;
break;
}
*
* Initial version.
*
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ *
+ * - Check results of IO routines more carefully.
+ *
*-------------------------------------------------------------------------
*/
} else {
AH->FH = stdout;
}
+
+ if (AH->FH == NULL)
+ die_horribly(NULL, "%s: Could not open output file\n", progname);
+
ctx->hasSeek = (fseek(AH->FH, 0, SEEK_CUR) == 0);
if (AH->compression < 0 || AH->compression > 9) {
} else {
AH->FH = stdin;
}
+
+ if (AH->FH == NULL)
+ die_horribly(NULL, "%s: Could not open input file\n", progname);
+
ctx->hasSeek = (fseek(AH->FH, 0, SEEK_CUR) == 0);
ReadHead(AH);
#else
tctx->FH = fopen(tctx->filename, PG_BINARY_W);
#endif
+
+ if (tctx->FH == NULL)
+ die_horribly(AH, "%s: Could not open data file for output\n", progname);
+
}
static int _WriteData(ArchiveHandle* AH, const void* data, int dLen)
AH->FH = fopen(filename,PG_BINARY_R);
#endif
+ if (AH->FH == NULL)
+ die_horribly(AH, "%s: Could not open data file for input\n", progname);
+
while ( (cnt = GZREAD(buf, 1, 4095, AH->FH)) > 0) {
buf[cnt] = '\0';
ahwrite(buf, 1, cnt, AH);
ctx->blobToc = fopen("blobs.toc", PG_BINARY_R);
+ if (ctx->blobToc == NULL)
+ die_horribly(AH, "%s: Could not open BLOB TOC for input\n", progname);
+
_getBlobTocEntry(AH, &oid, fname);
while(oid != 0)
static int _WriteByte(ArchiveHandle* AH, const int i)
{
lclContext* ctx = (lclContext*)AH->formatData;
- int res;
- res = fputc(i, AH->FH);
- if (res != EOF) {
- ctx->filePos += 1;
- }
- return res;
+ if (fputc(i, AH->FH) == EOF)
+ die_horribly(AH, "%s: could not write byte\n", progname);
+
+ ctx->filePos += 1;
+
+ return 1;
}
static int _ReadByte(ArchiveHandle* AH)
lclContext* ctx = (lclContext*)AH->formatData;
int res;
res = fwrite(buf, 1, len, AH->FH);
+ if (res != len)
+ die_horribly(AH, "%s: write error in _WriteBuf (%d != %d)\n", progname, res, len);
+
ctx->filePos += res;
return res;
}
sprintf(fname, "blobs.toc");
ctx->blobToc = fopen(fname, PG_BINARY_W);
-
+
+ if (ctx->blobToc == NULL)
+ die_horribly(AH, "%s: could not open BLOB TOC for output\n", progname);
+
}
/*
tctx->FH = fopen(fname, PG_BINARY_W);
#endif
+ if (tctx->FH == NULL)
+ die_horribly(AH, "%s: Could not open BLOB file\n", progname);
}
/*
*
* Initial version.
*
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ *
+ * - Check results of IO routines more carefully.
+ *
+ *
*-------------------------------------------------------------------------
*/
*
* Initial version.
*
+ * Modifications - 04-Jan-2001 - pjw@rhyme.com.au
+ *
+ * - Check results of IO routines more carefully.
+ *
*-------------------------------------------------------------------------
*/
} else {
ctx->tarFH = stdout;
}
+
+ if (ctx->tarFH == NULL)
+ die_horribly(NULL, "%s: Could not open TOC file for output.\n", progname);
+
ctx->tarFHpos = 0;
/* Make unbuffered since we will dup() it, and the buffers screw each other */
ctx->tarFH = stdin;
}
+ if (ctx->tarFH == NULL)
+ die_horribly(NULL, "%s: Could not open TOC file for input\n", progname);
+
/* Make unbuffered since we will dup() it, and the buffers screw each other */
/* setvbuf(ctx->tarFH, NULL, _IONBF, 0); */
tm->tmpFH = tmpfile();
+ if (tm->tmpFH == NULL)
+ die_horribly(AH, "%s: could not generate temp file name.\n", progname);
+
#ifdef HAVE_LIBZ
if (AH->compression != 0)
{
sprintf(fmode, "wb%d", AH->compression);
tm->zFH = gzdopen(dup(fileno(tm->tmpFH)), fmode);
+ if (tm->zFH == NULL)
+ die_horribly(AH, "%s: could not gzdopen temp file.\n", progname);
+
} else
tm->nFH = tm->tmpFH;
* Close the GZ file since we dup'd. This will flush the buffers.
*/
if (AH->compression != 0)
- GZCLOSE(th->zFH);
+ if (GZCLOSE(th->zFH) != 0)
+ die_horribly(AH, "%s: could not close tar member\n", progname);
if (th->mode == 'w')
_tarAddFile(AH, th); /* This will close the temp file */
else
res = fwrite(buf, 1, len, th->nFH);
+ if (res != len)
+ die_horribly(th->AH, "%s: could not write to tar member (%d != %d)\n", progname, res, len);
+
th->pos += res;
return res;
}
{
lclTocEntry* tctx = (lclTocEntry*)AH->currToc->formatData;
- tarWrite((void*)data, dLen, tctx->TH);
-
- /* GZWRITE((void*)data, 1, dLen, tctx->TH->FH); */
+ dLen = tarWrite((void*)data, dLen, tctx->TH);
return dLen;
}
/* Add a block of NULLs since it's de-rigeur. */
for(i=0; i<512; i++)
{
- fputc(0, ctx->tarFH);
+ if (fputc(0, ctx->tarFH) == EOF)
+ die_horribly(AH, "%s: could not write null block at end of TAR archive.\n", progname);
}
}
char buf[32768];
int cnt;
int len = 0;
+ int res;
int i, pad;
/*
while ( (cnt = fread(&buf[0], 1, 32767, tmp)) > 0)
{
- fwrite(&buf[0], 1, cnt, th->tarFH);
- len += cnt;
+ res = fwrite(&buf[0], 1, cnt, th->tarFH);
+ if (res != cnt)
+ die_horribly(AH, "%s: write error appending to TAR archive (%d != %d).\n", progname, res, cnt);
+ len += res;
}
- fclose(tmp); /* This *should* delete it... */
+ if (fclose(tmp) != 0) /* This *should* delete it... */
+ die_horribly(AH, "%s: Could not close tar member (fclose failed).\n", progname);
if (len != th->fileLen)
- die_horribly(AH, "%s: Actual file length does not match expected (%d vs. %d)\n",
+ die_horribly(AH, "%s: Actual file length does not match expected (%d vs. %d).\n",
progname, len, th->pos);
pad = ((len + 511) & ~511) - len;
for (i=0 ; i < pad ; i++)
- fputc('\0',th->tarFH);
+ {
+ if (fputc('\0',th->tarFH) == EOF)
+ die_horribly(AH, "%s: Could not output padding at end of tar member.\n", progname);
+ }
ctx->tarFHpos += len + pad;
}
lastSum = sum;
}
- fwrite(h, 1, 512, th->tarFH);
+ if (fwrite(h, 1, 512, th->tarFH) != 512) {
+ die_horribly(th->AH, "%s: unable to write tar header\n", progname);
+ }
+
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.185 2001/01/06 20:57:26 petere Exp $
+ * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.186 2001/01/12 04:32:07 pjw Exp $
*
* Modifications - 6/10/96 - dave@bensoft.com - version 1.13.dhb
*
* table with the currently implementation, and (b) it's not clear how to restore
* a partial BLOB backup (given the current OID-based BLOB implementation).
*
+ * Modifications - 04-Jan-2000 - pjw@rhyme.com.au
+ *
+ * - Check ntuples == 1 for various SELECT statements.
+ * - Fix handling of --tables=* (multiple tables never worked properly, AFAICT)
+ *
*-------------------------------------------------------------------------
*/
" -s, --schema-only dump out only the schema, no data\n"
" -S, --superuser=NAME specify the superuser user name to use in plain\n"
" text format\n"
- " -t, --table=TABLE dump for this table only\n"
+ " -t, --table=TABLE dump for this table only (* for all)\n"
" -u, --password use password authentication\n"
" -v, --verbose verbose\n"
" -x, --no-acl do not dump ACL's (grant/revoke)\n"
" -s dump out only the schema, no data\n"
" -S NAME specify the superuser user name to use in plain\n"
" text format\n"
- " -t TABLE dump for this table only\n"
+ " -t TABLE dump for this table only (* for all)\n"
" -u use password authentication\n"
" -v verbose\n"
" -x do not dump ACL's (grant/revoke)\n"
char copyBuf[512];
char *copyStmt;
- if (onlytable == NULL)
+ if (onlytable == NULL || (strlen(onlytable) == 0) )
all_only = "all";
else
all_only = "only";
if (g_verbose)
fprintf(stderr, "%s preparing to dump out the contents of %s %d table%s/sequence%s %s\n",
g_comment_start, all_only,
- (onlytable == NULL) ? numTables : 1,
- (onlytable == NULL) ? "s" : "", (onlytable == NULL) ? "s" : "",
+ (onlytable == NULL || (strlen(onlytable) == 0)) ? numTables : 1,
+ (onlytable == NULL || (strlen(onlytable) == 0)) ? "s" : "",
+ (onlytable == NULL || (strlen(onlytable) == 0)) ? "s" : "",
g_comment_end);
/* Dump SEQUENCEs first (if dataOnly) */
{
if (!(tblinfo[i].sequence))
continue;
- if (!onlytable || (!strcmp(tblinfo[i].relname, onlytable)))
+ if (!onlytable || (strcmp(tblinfo[i].relname, onlytable) == 0) || (strlen(onlytable) == 0) )
{
if (g_verbose)
fprintf(stderr, "%s dumping out schema of sequence '%s' %s\n",
if (tblinfo[i].sequence)/* already dumped */
continue;
- if (!onlytable || (!strcmp(classname, onlytable)))
+ if (!onlytable || (strcmp(classname, onlytable) == 0) || (strlen(onlytable) == 0))
{
if (g_verbose)
fprintf(stderr, "%s preparing to dump out the contents of Table '%s' %s\n",
for (i = 0; tablename[i]; i++)
if (isupper((unsigned char) tablename[i]))
tablename[i] = tolower((unsigned char) tablename[i]);
+
+ /* '*' is a special case meaning ALL tables, but only if unquoted */
+ if (strcmp(tablename,"*") == 0)
+ tablename[0] = '\0';
+
}
}
break;
exit(1);
}
- if (outputBlobs && (tablename != NULL) )
+ if (outputBlobs && tablename != NULL && strlen(tablename) > 0 )
{
fprintf(stderr,
- "%s: BLOB output is not supported for a single table. Use a full dump instead.\n",
+ "%s: BLOB output is not supported for a single table. Use all tables or a full dump instead.\n",
progname);
exit(1);
}
if (findx == numFuncs)
{
PGresult *r;
+ int numFuncs;
/*
* the funcname is an oid which we use to find the
r = PQexec(g_conn, query->data);
if (!r || PQresultStatus(r) != PGRES_TUPLES_OK)
{
- fprintf(stderr, "getTables(): SELECT (funcname) failed. Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
+ fprintf(stderr, "getTables(): SELECT (funcname) failed for trigger %s. Explanation from backend: '%s'.\n",
+ PQgetvalue(res2, i2, i_tgname), PQerrorMessage(g_conn));
+ exit_nicely(g_conn);
+ }
+
+ /* Sanity: Check we got only one tuple */
+ numFuncs = PQntuples(r);
+ if (numFuncs != 1) {
+ fprintf(stderr, "getTables(): SELECT (funcname) for trigger %s returned %d tuples. Expected 1.\n",
+ PQgetvalue(res2, i2, i_tgname), numFuncs);
exit_nicely(g_conn);
}
+
tgfunc = strdup(PQgetvalue(r, 0, PQfnumber(r, "proname")));
PQclear(r);
}
if (PQgetvalue(res, j, i_atthasdef)[0] == 't')
{
PGresult *res2;
+ int numAttr;
if (g_verbose)
fprintf(stderr, "%s finding DEFAULT expression for attr: '%s' %s\n",
"Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
+
+ /* Sanity: Check we got only one tuple */
+ numAttr = PQntuples(res2);
+ if (numAttr != 1) {
+ fprintf(stderr, "getTableAttrs(): SELECT (for DEFAULT) for attr %s returned %d tuples. Expected 1.\n",
+ tblinfo[i].attnames[j], numAttr);
+ exit_nicely(g_conn);
+ }
+
tblinfo[i].adef_expr[j] = strdup(PQgetvalue(res2, 0, PQfnumber(res2, "adsrc")));
PQclear(res2);
}
char *reltypename;
/* First - dump SEQUENCEs */
- if (tablename)
+ if (tablename && strlen(tablename) > 0)
{
serialSeq = malloc(strlen(tablename) + strlen(serialSeqSuffix) + 1);
strcpy(serialSeq, tablename);
if (tblinfo[i].sequence)/* already dumped */
continue;
- if (!tablename || (!strcmp(tblinfo[i].relname, tablename)))
+ if (!tablename || (!strcmp(tblinfo[i].relname, tablename)) || (strlen(tablename) == 0))
{
resetPQExpBuffer(delq);
funcname = NULL;
else
{
+ int numFuncs;
/*
* the funcname is an oid which we use to find the name of the
"Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
+
+ /* Sanity: Check we got only one tuple */
+ numFuncs = PQntuples(res);
+ if (numFuncs != 1) {
+ fprintf(stderr, "dumpIndices(): SELECT (funcname) for index %s returned %d tuples. Expected 1.\n",
+ indinfo[i].indrelname, numFuncs);
+ exit_nicely(g_conn);
+ }
+
funcname = strdup(PQgetvalue(res, 0, PQfnumber(res, "proname")));
PQclear(res);
}
/* convert opclass oid(s) into names */
for (nclass = 0; nclass < INDEX_MAX_KEYS; nclass++)
{
+ int numRows;
+
indclass = atoi(indinfo[i].indclass[nclass]);
if (indclass == 0)
break;
"Explanation from backend: '%s'.\n", PQerrorMessage(g_conn));
exit_nicely(g_conn);
}
+
+ /* Sanity: Check we got only one tuple */
+ numRows = PQntuples(res);
+ if (numRows != 1) {
+ fprintf(stderr, "dumpIndices(): SELECT (classname) for index %s returned %d tuples. Expected 1.\n",
+ indinfo[i].indrelname, numRows);
+ exit_nicely(g_conn);
+ }
+
classname[nclass] = strdup(PQgetvalue(res, 0, PQfnumber(res, "opcname")));
PQclear(res);
}
}
}
- if (!tablename || (!strcmp(indinfo[i].indrelname, tablename)))
+ if (!tablename || (strcmp(indinfo[i].indrelname, tablename) == 0) || (strlen(tablename) == 0) )
{
/*
for (i = 0; i < numTables; i++)
{
- if (tablename && strcmp(tblinfo[i].relname, tablename))
+ if (tablename && (strcmp(tblinfo[i].relname, tablename) != 0) && (strlen(tablename) > 0) )
continue;
+
for (j = 0; j < tblinfo[i].ntrig; j++)
{
ArchiveEntry(fout, tblinfo[i].triggers[j].oid, tblinfo[i].triggers[j].tgname,
*/
for (t = 0; t < numTables; t++)
{
- if (tablename && strcmp(tblinfo[t].relname, tablename))
+ if (tablename && (strcmp(tblinfo[t].relname, tablename) != 0) && (strlen(tablename) > 0) )
continue;
/*