* Darko Prenosil <Darko.Prenosil@finteh.hr>
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
*
- * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.98 2010/06/15 20:29:01 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/dblink/dblink.c,v 1.99 2010/07/06 19:18:54 momjian Exp $
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
appendStringInfo(&buf, " AND ");
appendStringInfoString(&buf,
- quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
+ quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
if (tgt_pkattvals[i] != NULL)
appendStringInfo(&buf, " = %s",
appendStringInfo(&buf, " AND ");
appendStringInfo(&buf, "%s",
- quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
+ quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
val = tgt_pkattvals[i];
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
- * with NULLs for any dropped columns. Otherwise we have to deal with
- * two different tupdescs and everything's very confusing.
+ * with NULLs for any dropped columns. Otherwise we have to deal with two
+ * different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
appendStringInfoString(&buf, "NULL");
else
appendStringInfoString(&buf,
- quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
+ quote_ident_cstr(NameStr(tupdesc->attrs[i]->attname)));
}
appendStringInfo(&buf, " FROM %s WHERE ", relname);
appendStringInfo(&buf, " AND ");
appendStringInfoString(&buf,
- quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
+ quote_ident_cstr(NameStr(tupdesc->attrs[pkattnum]->attname)));
if (src_pkattvals[i] != NULL)
appendStringInfo(&buf, " = %s",
/* Validate attnums and convert to internal form */
for (i = 0; i < pknumatts_arg; i++)
{
- int pkattnum = pkattnums_arg->values[i];
- int lnum;
- int j;
+ int pkattnum = pkattnums_arg->values[i];
+ int lnum;
+ int j;
/* Can throw error immediately if out of range */
if (pkattnum <= 0 || pkattnum > natts)
/*
* This is a port of the Double Metaphone algorithm for use in PostgreSQL.
*
- * $PostgreSQL: pgsql/contrib/fuzzystrmatch/dmetaphone.c,v 1.14 2010/04/05 02:46:20 adunstan Exp $
+ * $PostgreSQL: pgsql/contrib/fuzzystrmatch/dmetaphone.c,v 1.15 2010/07/06 19:18:55 momjian Exp $
*
* Double Metaphone computes 2 "sounds like" strings - a primary and an
* alternate. In most cases they are the same, but for foreign names
current += 1;
break;
- case '\xc7': /* C with cedilla */
+ case '\xc7': /* C with cedilla */
MetaphAdd(primary, "S");
MetaphAdd(secondary, "S");
current += 1;
MetaphAdd(secondary, "N");
break;
- case '\xd1': /* N with tilde */
+ case '\xd1': /* N with tilde */
current += 1;
MetaphAdd(primary, "N");
MetaphAdd(secondary, "N");
/*
- * $PostgreSQL: pgsql/contrib/pg_archivecleanup/pg_archivecleanup.c,v 1.2 2010/06/17 17:31:27 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/pg_archivecleanup/pg_archivecleanup.c,v 1.3 2010/07/06 19:18:55 momjian Exp $
*
* pg_archivecleanup.c
*
char *archiveLocation; /* where to find the archive? */
char *restartWALFileName; /* the file from which we can restart restore */
char WALFilePath[MAXPGPATH]; /* the file path including archive */
-char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we want to
- * remain in archive */
+char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we
+ * want to remain in
+ * archive */
/* =====================================================================
/*
* Initialize allows customized commands into the archive cleanup program.
*
- * You may wish to add code to check for tape libraries, etc..
+ * You may wish to add code to check for tape libraries, etc..
*/
static void
Initialize(void)
{
/*
- * This code assumes that archiveLocation is a directory, so we use
- * stat to test if it's accessible.
+ * This code assumes that archiveLocation is a directory, so we use stat
+ * to test if it's accessible.
*/
struct stat stat_buf;
while ((xlde = readdir(xldir)) != NULL)
{
/*
- * We ignore the timeline part of the XLOG segment identifiers
- * in deciding whether a segment is still needed. This
- * ensures that we won't prematurely remove a segment from a
- * parent timeline. We could probably be a little more
- * proactive about removing segments of non-parent timelines,
- * but that would be a whole lot more complicated.
+ * We ignore the timeline part of the XLOG segment identifiers in
+ * deciding whether a segment is still needed. This ensures that
+ * we won't prematurely remove a segment from a parent timeline.
+ * We could probably be a little more proactive about removing
+ * segments of non-parent timelines, but that would be a whole lot
+ * more complicated.
*
- * We use the alphanumeric sorting property of the filenames
- * to decide which ones are earlier than the
- * exclusiveCleanupFileName file. Note that this means files
- * are not removed in the order they were originally written,
- * in case this worries you.
+ * We use the alphanumeric sorting property of the filenames to
+ * decide which ones are earlier than the exclusiveCleanupFileName
+ * file. Note that this means files are not removed in the order
+ * they were originally written, in case this worries you.
*/
if (strlen(xlde->d_name) == XLOG_DATA_FNAME_LEN &&
- strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
- strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
+ strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
+ strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0)
{
#ifdef WIN32
snprintf(WALFilePath, MAXPGPATH, "%s\\%s", archiveLocation, xlde->d_name);
static void
SetWALFileNameForCleanup(void)
{
- bool fnameOK = false;
+ bool fnameOK = false;
/*
- * If restartWALFileName is a WAL file name then just use it directly.
- * If restartWALFileName is a .backup filename, make sure we use
- * the prefix of the filename, otherwise we will remove wrong files
- * since 000000010000000000000010.00000020.backup is after
+ * If restartWALFileName is a WAL file name then just use it directly. If
+ * restartWALFileName is a .backup filename, make sure we use the prefix
+ * of the filename, otherwise we will remove wrong files since
+ * 000000010000000000000010.00000020.backup is after
* 000000010000000000000010.
*/
if (strlen(restartWALFileName) == XLOG_DATA_FNAME_LEN &&
}
else if (strlen(restartWALFileName) == XLOG_BACKUP_FNAME_LEN)
{
- int args;
+ int args;
uint32 tli = 1,
log = 0,
seg = 0,
offset = 0;
+
args = sscanf(restartWALFileName, "%08X%08X%08X.%08X.backup", &tli, &log, &seg, &offset);
if (args == 4)
{
fnameOK = true;
+
/*
- * Use just the prefix of the filename, ignore everything after first period
+ * Use just the prefix of the filename, ignore everything after
+ * first period
*/
XLogFileName(exclusiveCleanupFileName, tli, log, seg);
}
printf("Usage:\n");
printf(" %s [OPTION]... ARCHIVELOCATION OLDESTKEPTWALFILE\n", progname);
printf("\n"
- "for use as an archive_cleanup_command in the recovery.conf when standby_mode = on:\n"
+ "for use as an archive_cleanup_command in the recovery.conf when standby_mode = on:\n"
" archive_cleanup_command = 'pg_archivecleanup [OPTION]... ARCHIVELOCATION %%r'\n"
"e.g.\n"
" archive_cleanup_command = 'pg_archivecleanup /mnt/server/archiverdir %%r'\n");
printf("\n"
- "or for use as a standalone archive cleaner:\n"
+ "or for use as a standalone archive cleaner:\n"
"e.g.\n"
" pg_archivecleanup /mnt/server/archiverdir 000000010000000000000010.00000020.backup\n");
printf("\nOptions:\n");
/*
* We will go to the archiveLocation to check restartWALFileName.
- * restartWALFileName may not exist anymore, which would not be an error, so
- * we separate the archiveLocation and restartWALFileName so we can check
- * separately whether archiveLocation exists, if not that is an error
+ * restartWALFileName may not exist anymore, which would not be an error,
+ * so we separate the archiveLocation and restartWALFileName so we can
+ * check separately whether archiveLocation exists, if not that is an
+ * error
*/
if (optind < argc)
{
* server checks and output routines
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/check.c,v 1.10 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/check.c,v 1.11 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
{
prep_status(ctx, "Adjusting sequences");
exec_prog(ctx, true,
- SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
- "--username \"%s\" -f \"%s\" --dbname template1 >> \"%s\""
+ SYSTEMQUOTE "\"%s/psql\" --set ON_ERROR_STOP=on --port %d "
+ "--username \"%s\" -f \"%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user,
sequence_script_file_name, ctx->logfile);
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(ctx->new.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
- PG_MAJORVERSION);
+ PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
*/
void
create_script_for_old_cluster_deletion(migratorContext *ctx,
- char **deletion_script_file_name)
+ char **deletion_script_file_name)
{
FILE *script = NULL;
int tblnum;
if ((script = fopen(*deletion_script_file_name, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
- *deletion_script_file_name);
+ *deletion_script_file_name);
#ifndef WIN32
/* add shebang header */
}
}
else
+
/*
* Simply delete the tablespace directory, which might be ".old"
* or a version-specific subdirectory.
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_log(ctx, PG_FATAL, "Could not add execute permission to file: %s\n",
- *deletion_script_file_name);
+ *deletion_script_file_name);
#endif
check_ok(ctx);
* controldata functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/controldata.c,v 1.8 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/controldata.c,v 1.9 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
fputs(bufin, ctx->debug_fd);
#ifdef WIN32
+
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
* dump functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/dump.c,v 1.6 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/dump.c,v 1.7 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --username \"%s\" "
"--schema-only --binary-upgrade > \"%s/" ALL_DUMP_FILE "\""
- SYSTEMQUOTE, ctx->new.bindir, ctx->old.port, ctx->user, ctx->cwd);
+ SYSTEMQUOTE, ctx->new.bindir, ctx->old.port, ctx->user, ctx->cwd);
check_ok(ctx);
}
* execution functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/exec.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/exec.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
static void checkBinDir(migratorContext *ctx, ClusterInfo *cluster);
-static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName);
+static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName);
static const char *validate_exec(const char *path);
static int check_data_dir(migratorContext *ctx, const char *pg_data);
return (fail) ? -1 : 0;
}
-
-
* file system operations
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/file.c,v 1.12 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/file.c,v 1.13 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
*/
int
pg_scandir(migratorContext *ctx, const char *dirname,
- struct dirent ***namelist,
+ struct dirent *** namelist,
int (*selector) (const struct dirent *))
{
#ifndef HAVE_SCANDIR
/*
* scandir() is originally from BSD 4.3, which had the third argument as
* non-const. Linux and other C libraries have updated it to use a const.
- * http://unix.derkeiler.com/Mailing-Lists/FreeBSD/questions/2005-12/msg00214.html
+ * http://unix.derkeiler.com/Mailing-Lists/FreeBSD/questions/2005-12/msg002
+ * 14.html
*
* Here we try to guess which libc's need const, and which don't. The net
* goal here is to try to suppress a compiler warning due to a prototype
* mismatch of const usage. Ideally we would do this via autoconf, but
- * autoconf doesn't have a suitable builtin test and it seems overkill
- * to add one just to avoid a warning.
+ * autoconf doesn't have a suitable builtin test and it seems overkill to
+ * add one just to avoid a warning.
*/
#elif defined(__FreeBSD__) || defined(__bsdi__) || defined(__darwin__) || defined(__OpenBSD__)
/* no const */
* information support functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/info.c,v 1.10 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/info.c,v 1.11 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
Cluster whichCluster);
static void relarr_print(migratorContext *ctx, RelInfoArr *arr);
static void get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
- RelInfoArr *relarr, Cluster whichCluster);
+ RelInfoArr *relarr, Cluster whichCluster);
static void relarr_free(RelInfoArr *rel_arr);
static void map_rel(migratorContext *ctx, const RelInfo *oldrel,
- const RelInfo *newrel, const DbInfo *old_db,
- const DbInfo *new_db, const char *olddata,
- const char *newdata, FileNameMap *map);
+ const RelInfo *newrel, const DbInfo *old_db,
+ const DbInfo *new_db, const char *olddata,
+ const char *newdata, FileNameMap *map);
static void map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static RelInfo *relarr_lookup_reloid(migratorContext *ctx,
- RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
+ RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
static RelInfo *relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
- const char *nspname, const char *relname,
- Cluster whichCluster);
+ const char *nspname, const char *relname,
+ Cluster whichCluster);
/*
int i_oid;
int i_spclocation;
- res = executeQueryOrDie(ctx, conn,
- "SELECT d.oid, d.datname, t.spclocation "
- "FROM pg_catalog.pg_database d "
- " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
- " ON d.dattablespace = t.oid "
- "WHERE d.datallowconn = true");
-
+ res = executeQueryOrDie(ctx, conn,
+ "SELECT d.oid, d.datname, t.spclocation "
+ "FROM pg_catalog.pg_database d "
+ " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+ " ON d.dattablespace = t.oid "
+ "WHERE d.datallowconn = true");
+
i_datname = PQfnumber(res, "datname");
i_oid = PQfnumber(res, "oid");
i_spclocation = PQfnumber(res, "spclocation");
for (relnum = 0; relnum < ntups; relnum++)
{
RelInfo *curr = &relinfos[num_rels++];
- const char *tblspace;
+ const char *tblspace;
curr->reloid = atol(PQgetvalue(res, relnum, i_oid));
* options functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/option.c,v 1.11 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/option.c,v 1.12 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
int option; /* Command line option */
int optindex = 0; /* used by getopt_long */
int user_id;
-
+
if (getenv("PGUSER"))
{
pg_free(ctx->user);
/* user lookup and 'root' test must be split because of usage() */
user_id = get_user_info(ctx, &ctx->user);
-
+
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
if ((*dirpath)[strlen(*dirpath) - 1] == '/')
#else
if ((*dirpath)[strlen(*dirpath) - 1] == '/' ||
- (*dirpath)[strlen(*dirpath) - 1] == '\\')
+ (*dirpath)[strlen(*dirpath) - 1] == '\\')
#endif
(*dirpath)[strlen(*dirpath) - 1] = 0;
}
* main source file
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.c,v 1.9 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.c,v 1.10 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
*/
prep_status(&ctx, "Setting next oid for new cluster");
exec_prog(&ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
- DEVNULL SYSTEMQUOTE,
+ DEVNULL SYSTEMQUOTE,
ctx.new.bindir, ctx.old.controldata.chkpnt_nxtoid, ctx.new.pgdata);
check_ok(&ctx);
check_ok(ctx);
/*
- * We do freeze after analyze so pg_statistic is also frozen.
- * template0 is not frozen here, but data rows were frozen by initdb,
- * and we set its datfrozenxid and relfrozenxids later to match the
- * new xid counter later.
+ * We do freeze after analyze so pg_statistic is also frozen. template0 is
+ * not frozen here, but data rows were frozen by initdb, and we set its
+ * datfrozenxid and relfrozenxids later to match the new xid counter
+ * later.
*/
prep_status(ctx, "Freezing all rows on the new cluster");
exec_prog(ctx, true,
prep_status(ctx, "Creating databases in the new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --port %d --username \"%s\" "
- "--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
+ "--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
GLOBALS_DUMP_FILE, ctx->logfile);
prep_status(ctx, "Restoring database schema to new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/psql\" --port %d --username \"%s\" "
- "--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
+ "--set ON_ERROR_STOP=on -f \"%s/%s\" --dbname template1 >> \"%s\""
SYSTEMQUOTE,
- ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
+ ctx->new.bindir, ctx->new.port, ctx->user, ctx->cwd,
DB_DUMP_FILE, ctx->logfile);
check_ok(ctx);
set_frozenxids(migratorContext *ctx)
{
int dbnum;
- PGconn *conn, *conn_template1;
+ PGconn *conn,
+ *conn_template1;
PGresult *dbres;
int ntups;
int i_datname;
ntups = PQntuples(dbres);
for (dbnum = 0; dbnum < ntups; dbnum++)
{
- char *datname = PQgetvalue(dbres, dbnum, i_datname);
- char *datallowconn= PQgetvalue(dbres, dbnum, i_datallowconn);
+ char *datname = PQgetvalue(dbres, dbnum, i_datname);
+ char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn);
/*
- * We must update databases where datallowconn = false, e.g.
- * template0, because autovacuum increments their datfrozenxids and
- * relfrozenxids even if autovacuum is turned off, and even though
- * all the data rows are already frozen To enable this, we
- * temporarily change datallowconn.
+ * We must update databases where datallowconn = false, e.g.
+ * template0, because autovacuum increments their datfrozenxids and
+ * relfrozenxids even if autovacuum is turned off, and even though all
+ * the data rows are already frozen To enable this, we temporarily
+ * change datallowconn.
*/
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
- "UPDATE pg_catalog.pg_database "
- "SET datallowconn = true "
- "WHERE datname = '%s'", datname));
+ "UPDATE pg_catalog.pg_database "
+ "SET datallowconn = true "
+ "WHERE datname = '%s'", datname));
conn = connectToServer(ctx, datname, CLUSTER_NEW);
/* Reset datallowconn flag */
if (strcmp(datallowconn, "f") == 0)
PQclear(executeQueryOrDie(ctx, conn_template1,
- "UPDATE pg_catalog.pg_database "
- "SET datallowconn = false "
- "WHERE datname = '%s'", datname));
+ "UPDATE pg_catalog.pg_database "
+ "SET datallowconn = false "
+ "WHERE datname = '%s'", datname));
}
PQclear(dbres);
* pg_upgrade.h
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.h,v 1.14 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/pg_upgrade.h,v 1.15 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
#define MAX_STRING 1024
#define LINE_ALLOC 4096
#define QUERY_ALLOC 8192
-
+
#define MIGRATOR_API_VERSION 1
#define MESSAGE_WIDTH "60"
#define CLUSTERNAME(cluster) ((cluster) == CLUSTER_OLD ? "old" : "new")
/* OID system catalog preservation added during PG 9.0 development */
-#define TABLE_SPACE_SUBDIRS 201001111
+#define TABLE_SPACE_SUBDIRS 201001111
/*
* Each relation is represented by a relinfo structure.
*/
typedef struct
{
- char nspname[NAMEDATALEN]; /* namespace name */
- char relname[NAMEDATALEN]; /* relation name */
+ char nspname[NAMEDATALEN]; /* namespace name */
+ char relname[NAMEDATALEN]; /* relation name */
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
Oid toastrelid; /* oid of the toast relation */
- char tablespace[MAXPGPATH]; /* relations tablespace path */
+ char tablespace[MAXPGPATH]; /* relations tablespace path */
} RelInfo;
typedef struct
Oid new; /* Relfilenode of the new relation */
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
- char old_nspname[NAMEDATALEN]; /* old name of the namespace */
- char old_relname[NAMEDATALEN]; /* old name of the relation */
- char new_nspname[NAMEDATALEN]; /* new name of the namespace */
- char new_relname[NAMEDATALEN]; /* new name of the relation */
+ char old_nspname[NAMEDATALEN]; /* old name of the namespace */
+ char old_relname[NAMEDATALEN]; /* old name of the relation */
+ char new_nspname[NAMEDATALEN]; /* new name of the namespace */
+ char new_relname[NAMEDATALEN]; /* new name of the relation */
} FileNameMap;
/*
*/
typedef enum
{
- NONE = 0, /* used for no running servers */
+ NONE = 0, /* used for no running servers */
CLUSTER_OLD,
CLUSTER_NEW
} Cluster;
typedef struct
{
ControlData controldata; /* pg_control information */
- DbInfoArr dbarr; /* dbinfos array */
- char *pgdata; /* pathname for cluster's $PGDATA directory */
- char *bindir; /* pathname for cluster's executable directory */
- unsigned short port; /* port number where postmaster is waiting */
- uint32 major_version; /* PG_VERSION of cluster */
- char *major_version_str; /* string PG_VERSION of cluster */
- Oid pg_database_oid; /* OID of pg_database relation */
- char *libpath; /* pathname for cluster's pkglibdir */
- char *tablespace_suffix; /* directory specification */
+ DbInfoArr dbarr; /* dbinfos array */
+ char *pgdata; /* pathname for cluster's $PGDATA directory */
+ char *bindir; /* pathname for cluster's executable directory */
+ unsigned short port; /* port number where postmaster is waiting */
+ uint32 major_version; /* PG_VERSION of cluster */
+ char *major_version_str; /* string PG_VERSION of cluster */
+ Oid pg_database_oid; /* OID of pg_database relation */
+ char *libpath; /* pathname for cluster's pkglibdir */
+ char *tablespace_suffix; /* directory specification */
} ClusterInfo;
*/
typedef struct
{
- ClusterInfo old, new; /* old and new cluster information */
+ ClusterInfo old,
+ new; /* old and new cluster information */
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
- char cwd[MAXPGPATH]; /* current working directory, used for output */
+ char cwd[MAXPGPATH]; /* current working directory, used for output */
char **tablespaces; /* tablespaces */
int num_tablespaces;
char **libraries; /* loadable libraries */
* changes */
bool verbose; /* TRUE -> be verbose in messages */
bool debug; /* TRUE -> log more information */
- transferMode transfer_mode; /* copy files or link them? */
+ transferMode transfer_mode; /* copy files or link them? */
} migratorContext;
/*
* Global variables
*/
-extern char scandir_file_pattern[];
+extern char scandir_file_pattern[];
/* check.c */
void output_check_banner(migratorContext *ctx, bool *live_check);
-void check_old_cluster(migratorContext *ctx, bool live_check,
- char **sequence_script_file_name);
+void check_old_cluster(migratorContext *ctx, bool live_check,
+ char **sequence_script_file_name);
void check_new_cluster(migratorContext *ctx);
void report_clusters_compatible(migratorContext *ctx);
-void issue_warnings(migratorContext *ctx,
- char *sequence_script_file_name);
-void output_completion_banner(migratorContext *ctx,
- char *deletion_script_file_name);
+void issue_warnings(migratorContext *ctx,
+ char *sequence_script_file_name);
+void output_completion_banner(migratorContext *ctx,
+ char *deletion_script_file_name);
void check_cluster_versions(migratorContext *ctx);
void check_cluster_compatibility(migratorContext *ctx, bool live_check);
-void create_script_for_old_cluster_deletion(migratorContext *ctx,
- char **deletion_script_file_name);
+void create_script_for_old_cluster_deletion(migratorContext *ctx,
+ char **deletion_script_file_name);
/* controldata.c */
void get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check);
-void check_control_data(migratorContext *ctx, ControlData *oldctrl,
+void check_control_data(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl);
/* exec.c */
-int exec_prog(migratorContext *ctx, bool throw_error,
- const char *cmd,...);
+int exec_prog(migratorContext *ctx, bool throw_error,
+ const char *cmd,...);
void verify_directories(migratorContext *ctx);
bool is_server_running(migratorContext *ctx, const char *datadir);
void rename_old_pg_control(migratorContext *ctx);
typedef struct
{
- uint16 oldPageVersion; /* Page layout version of the old
- * cluster */
- uint16 newPageVersion; /* Page layout version of the new
- * cluster */
+ uint16 oldPageVersion; /* Page layout version of the old cluster */
+ uint16 newPageVersion; /* Page layout version of the new cluster */
uint16 pluginVersion; /* API version of converter plugin */
- void *pluginData; /* Plugin data (set by plugin) */
- pluginStartup startup; /* Pointer to plugin's startup function */
- pluginConvertFile convertFile; /* Pointer to plugin's file converter
+ void *pluginData; /* Plugin data (set by plugin) */
+ pluginStartup startup; /* Pointer to plugin's startup function */
+ pluginConvertFile convertFile; /* Pointer to plugin's file converter
* function */
- pluginConvertPage convertPage; /* Pointer to plugin's page converter
+ pluginConvertPage convertPage; /* Pointer to plugin's page converter
* function */
pluginShutdown shutdown; /* Pointer to plugin's shutdown function */
} pageCnvCtx;
const char *setupPageConverter(migratorContext *ctx, pageCnvCtx **result);
-
#else
/* dummy */
typedef void *pageCnvCtx;
#endif
-int dir_matching_filenames(const struct dirent *scan_ent);
-int pg_scandir(migratorContext *ctx, const char *dirname,
- struct dirent ***namelist,
- int (*selector) (const struct dirent *));
+int dir_matching_filenames(const struct dirent * scan_ent);
+int pg_scandir(migratorContext *ctx, const char *dirname,
+ struct dirent *** namelist,
+ int (*selector) (const struct dirent *));
const char *copyAndUpdateFile(migratorContext *ctx,
pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
/* function.c */
void install_support_functions(migratorContext *ctx);
-void uninstall_support_functions(migratorContext *ctx);
+void uninstall_support_functions(migratorContext *ctx);
void get_loadable_libraries(migratorContext *ctx);
void check_loadable_libraries(migratorContext *ctx);
/* info.c */
FileNameMap *gen_db_file_maps(migratorContext *ctx, DbInfo *old_db,
- DbInfo *new_db, int *nmaps, const char *old_pgdata,
- const char *new_pgdata);
-void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
- Cluster whichCluster);
+ DbInfo *new_db, int *nmaps, const char *old_pgdata,
+ const char *new_pgdata);
+void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
+ Cluster whichCluster);
DbInfo *dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name);
void dbarr_free(DbInfoArr *db_arr);
-void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
- const char *dbName);
+void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
+ const char *dbName);
/* option.c */
/* server.c */
-PGconn *connectToServer(migratorContext *ctx, const char *db_name,
+PGconn *connectToServer(migratorContext *ctx, const char *db_name,
Cluster whichCluster);
-PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
+PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
const char *fmt,...);
-void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
+void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
void stop_postmaster(migratorContext *ctx, bool fast, bool quiet);
-uint32 get_major_server_version(migratorContext *ctx, char **verstr,
- Cluster whichCluster);
+uint32 get_major_server_version(migratorContext *ctx, char **verstr,
+ Cluster whichCluster);
void check_for_libpq_envvars(migratorContext *ctx);
/* version.c */
-void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
- bool check_mode, Cluster whichCluster);
+void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
+ bool check_mode, Cluster whichCluster);
/* version_old_8_3.c */
-void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
- Cluster whichCluster);
-void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
- Cluster whichCluster);
-void old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
- Cluster whichCluster);
-void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
- bool check_mode, Cluster whichCluster);
-void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
- bool check_mode, Cluster whichCluster);
-void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
- bool check_mode, Cluster whichCluster);
-char *old_8_3_create_sequence_script(migratorContext *ctx,
- Cluster whichCluster);
+void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
+ Cluster whichCluster);
+void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
+ Cluster whichCluster);
+void old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
+ Cluster whichCluster);
+void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
+ bool check_mode, Cluster whichCluster);
+void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
+ bool check_mode, Cluster whichCluster);
+void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
+ bool check_mode, Cluster whichCluster);
+char *old_8_3_create_sequence_script(migratorContext *ctx,
+ Cluster whichCluster);
* relfilenode functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/relfilenode.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/relfilenode.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
const char *newnspname, const char *newrelname);
/* used by scandir(), must be global */
-char scandir_file_pattern[MAXPGPATH];
-
+char scandir_file_pattern[MAXPGPATH];
+
/*
* transfer_all_new_dbs()
*
* database server functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/server.c,v 1.7 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/server.c,v 1.8 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
}
/*
- * On Win32, we can't send both server output and pg_ctl output
- * to the same file because we get the error:
- * "The process cannot access the file because it is being used by another process."
- * so we have to send pg_ctl output to 'nul'.
- */
+ * On Win32, we can't send both server output and pg_ctl output to the
+ * same file because we get the error: "The process cannot access the file
+ * because it is being used by another process." so we have to send pg_ctl
+ * output to 'nul'.
+ */
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
"-o \"-p %d -c autovacuum=off "
/* See comment in start_postmaster() about why win32 output is ignored. */
snprintf(cmd, sizeof(cmd),
- SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> "
- "\"%s\" 2>&1" SYSTEMQUOTE,
- bindir, ctx->logfile, datadir, fast ? "-m fast" : "",
+ SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> "
+ "\"%s\" 2>&1" SYSTEMQUOTE,
+ bindir, ctx->logfile, datadir, fast ? "-m fast" : "",
#ifndef WIN32
- ctx->logfile);
+ ctx->logfile);
#else
- DEVNULL);
+ DEVNULL);
#endif
exec_prog(ctx, fast ? false : true, "%s", cmd);
* tablespace functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/tablespace.c,v 1.5 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/tablespace.c,v 1.6 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
static void get_tablespace_paths(migratorContext *ctx);
static void set_tablespace_directory_suffix(migratorContext *ctx,
- Cluster whichCluster);
+ Cluster whichCluster);
void
if ((ctx->num_tablespaces = PQntuples(res)) != 0)
ctx->tablespaces = (char **) pg_malloc(ctx,
- ctx->num_tablespaces * sizeof(char *));
+ ctx->num_tablespaces * sizeof(char *));
else
ctx->tablespaces = NULL;
* utility functions
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade/util.c,v 1.4 2010/07/03 16:33:14 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade/util.c,v 1.5 2010/07/06 19:18:55 momjian Exp $
*/
#include "pg_upgrade.h"
int
get_user_info(migratorContext *ctx, char **user_name)
{
- int user_id;
-
+ int user_id;
+
#ifndef WIN32
struct passwd *pw = getpwuid(geteuid());
* to control oid and relfilenode assignment
*
* Copyright (c) 2010, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/contrib/pg_upgrade_support/pg_upgrade_support.c,v 1.4 2010/07/03 16:33:15 momjian Exp $
+ * $PostgreSQL: pgsql/contrib/pg_upgrade_support/pg_upgrade_support.c,v 1.5 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
* not be compiling against PG 9.0.
*/
extern void EnumValuesCreate(Oid enumTypeOid, List *vals,
- Oid binary_upgrade_next_pg_enum_oid);
+ Oid binary_upgrade_next_pg_enum_oid);
#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
extern PGDLLIMPORT Oid binary_upgrade_next_toast_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_index_relfilenode;
-Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
-Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
-Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
-Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
-Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
-Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
-Datum add_pg_enum_label(PG_FUNCTION_ARGS);
+Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
+Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
+Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
+Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
+Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
+Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
+Datum add_pg_enum_label(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(set_next_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_array_oid);
{
Oid enumoid = PG_GETARG_OID(0);
Oid typoid = PG_GETARG_OID(1);
- Name label = PG_GETARG_NAME(2);
-
+ Name label = PG_GETARG_NAME(2);
+
EnumValuesCreate(typoid, list_make1(makeString(NameStr(*label))),
enumoid);
PG_RETURN_VOID();
}
-
* A simple benchmark program for PostgreSQL
* Originally written by Tatsuo Ishii and enhanced by many contributors.
*
- * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.98 2010/03/23 01:29:22 itagaki Exp $
+ * $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.99 2010/07/06 19:18:55 momjian Exp $
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
* ALL RIGHTS RESERVED;
*
TState *thread = (TState *) arg;
CState *state = thread->state;
TResult *result;
- FILE *logfile = NULL; /* per-thread log file */
+ FILE *logfile = NULL; /* per-thread log file */
instr_time start,
end;
int nstate = thread->nstate;
goto done;
}
- FD_SET (sock, &input_mask);
+ FD_SET(sock, &input_mask);
if (maxsock < sock)
maxsock = sock;
/*
* Set a different random seed in each child process. Otherwise they all
- * inherit the parent's state and generate the same "random" sequence.
- * (In the threaded case, the different threads will obtain subsets of the
+ * inherit the parent's state and generate the same "random" sequence. (In
+ * the threaded case, the different threads will obtain subsets of the
* output of a single random() sequence, which should be okay for our
* purposes.)
*/
void *(*routine) (void *);
void *arg;
void *result;
-} win32_pthread;
+} win32_pthread;
static unsigned __stdcall
win32_pthread_run(void *arg)
*
* $From: sha2.c,v 1.1 2001/11/08 00:01:51 adg Exp adg $
*
- * $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.12 2010/04/02 15:21:20 mha Exp $
+ * $PostgreSQL: pgsql/contrib/pgcrypto/sha2.c,v 1.13 2010/07/06 19:18:55 momjian Exp $
*/
#include "postgres.h"
*
* NOTE: The naming of R and S appears backwards here (R is a SHIFT and
* S is a ROTATION) because the SHA-256/384/512 description document
- * (see http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf)
+ * (see http://www.iwar.org.uk/comsec/resources/cipher/sha256-384-512.pdf)
* uses this same "backwards" definition.
*/
/* Shift-right (used in SHA-256, SHA-384, and SHA-512): */
/*
- * $PostgreSQL: pgsql/contrib/xml2/xpath.c,v 1.29 2010/03/03 19:10:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/xml2/xpath.c,v 1.30 2010/07/06 19:18:55 momjian Exp $
*
* Parser interface for DOM-based parser (libxml) rather than
* stream-based SAX-type parser
}
/*
- * Setup the parser. This should happen after we are done evaluating
- * the query, in case it calls functions that set up libxml differently.
+ * Setup the parser. This should happen after we are done evaluating the
+ * query, in case it calls functions that set up libxml differently.
*/
pgxml_parser_init();
/*
- * $PostgreSQL: pgsql/contrib/xml2/xslt_proc.c,v 1.20 2010/03/03 19:10:22 tgl Exp $
+ * $PostgreSQL: pgsql/contrib/xml2/xslt_proc.c,v 1.21 2010/07/06 19:18:55 momjian Exp $
*
* XSLT processing functions (requiring libxslt)
*
#include <libxslt/xsltInternals.h>
#include <libxslt/transform.h>
#include <libxslt/xsltutils.h>
-
-#endif /* USE_LIBXSLT */
+#endif /* USE_LIBXSLT */
/* externally accessible functions */
static void parse_params(const char **params, text *paramstr);
#define MAXPARAMS 20 /* must be even, see parse_params() */
-
-#endif /* USE_LIBXSLT */
+#endif /* USE_LIBXSLT */
PG_FUNCTION_INFO_V1(xslt_process);
PG_RETURN_NULL();
PG_RETURN_TEXT_P(cstring_to_text_with_len((char *) resstr, reslen));
-
-#else /* !USE_LIBXSLT */
+#else /* !USE_LIBXSLT */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("xslt_process() is not available without libxslt")));
PG_RETURN_NULL();
-
-#endif /* USE_LIBXSLT */
+#endif /* USE_LIBXSLT */
}
#ifdef USE_LIBXSLT
params[i] = NULL;
}
-#endif /* USE_LIBXSLT */
+#endif /* USE_LIBXSLT */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.291 2010/05/02 22:37:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.292 2010/07/06 19:18:55 momjian Exp $
*
*
* INTERFACE ROUTINES
/*
* We're about to remove tuples. In Hot Standby mode, ensure that there's
* no queries running for which the removed tuples are still visible.
- *
- * Not all HEAP2_CLEAN records remove tuples with xids, so we only want
- * to conflict on the records that cause MVCC failures for user queries.
- * If latestRemovedXid is invalid, skip conflict processing.
+ *
+ * Not all HEAP2_CLEAN records remove tuples with xids, so we only want to
+ * conflict on the records that cause MVCC failures for user queries. If
+ * latestRemovedXid is invalid, skip conflict processing.
*/
if (InHotStandby && TransactionIdIsValid(xlrec->latestRemovedXid))
ResolveRecoveryConflictWithSnapshot(xlrec->latestRemovedXid,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.24 2010/04/22 02:15:45 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/pruneheap.c,v 1.25 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
{
- TransactionId ignore = InvalidTransactionId; /* return value not needed */
+ TransactionId ignore = InvalidTransactionId; /* return value not
+ * needed */
/* OK to prune */
(void) heap_page_prune(relation, buffer, OldestXmin, true, &ignore);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.122 2010/03/28 09:27:01 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.123 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
*/
void
_bt_delitems_vacuum(Relation rel, Buffer buf,
- OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
+ OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
void
_bt_delitems_delete(Relation rel, Buffer buf,
- OffsetNumber *itemnos, int nitems, Relation heapRel)
+ OffsetNumber *itemnos, int nitems, Relation heapRel)
{
Page page = BufferGetPage(buf);
BTPageOpaque opaque;
rdata[0].next = &(rdata[1]);
/*
- * We need the target-offsets array whether or not we store the
- * to allow us to find the latestRemovedXid on a standby server.
+ * We need the target-offsets array whether or not we store the to
+ * allow us to find the latestRemovedXid on a standby server.
*/
rdata[1].data = (char *) itemnos;
rdata[1].len = nitems * sizeof(OffsetNumber);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.68 2010/04/30 06:34:29 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.69 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
btree_xlog_delete_get_latestRemovedXid(XLogRecord *record)
{
xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
- OffsetNumber *unused;
- Buffer ibuffer, hbuffer;
- Page ipage, hpage;
- ItemId iitemid, hitemid;
- IndexTuple itup;
+ OffsetNumber *unused;
+ Buffer ibuffer,
+ hbuffer;
+ Page ipage,
+ hpage;
+ ItemId iitemid,
+ hitemid;
+ IndexTuple itup;
HeapTupleHeader htuphdr;
- BlockNumber hblkno;
- OffsetNumber hoffnum;
- TransactionId latestRemovedXid = InvalidTransactionId;
- TransactionId htupxid = InvalidTransactionId;
- int i;
+ BlockNumber hblkno;
+ OffsetNumber hoffnum;
+ TransactionId latestRemovedXid = InvalidTransactionId;
+ TransactionId htupxid = InvalidTransactionId;
+ int i;
/*
- * If there's nothing running on the standby we don't need to derive
- * a full latestRemovedXid value, so use a fast path out of here.
- * That returns InvalidTransactionId, and so will conflict with
- * users, but since we just worked out that's zero people, its OK.
+ * If there's nothing running on the standby we don't need to derive a
+ * full latestRemovedXid value, so use a fast path out of here. That
+ * returns InvalidTransactionId, and so will conflict with users, but
+ * since we just worked out that's zero people, its OK.
*/
if (CountDBBackends(InvalidOid) == 0)
return latestRemovedXid;
ipage = (Page) BufferGetPage(ibuffer);
/*
- * Loop through the deleted index items to obtain the TransactionId
- * from the heap items they point to.
+ * Loop through the deleted index items to obtain the TransactionId from
+ * the heap items they point to.
*/
unused = (OffsetNumber *) ((char *) xlrec + SizeOfBtreeDelete);
hpage = (Page) BufferGetPage(hbuffer);
/*
- * Look up the heap tuple header that the index tuple points at
- * by using the heap node supplied with the xlrec. We can't use
+ * Look up the heap tuple header that the index tuple points at by
+ * using the heap node supplied with the xlrec. We can't use
* heap_fetch, since it uses ReadBuffer rather than XLogReadBuffer.
* Note that we are not looking at tuple data here, just headers.
*/
htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
/*
- * Get the heap tuple's xmin/xmax and ratchet up the latestRemovedXid.
- * No need to consider xvac values here.
+ * Get the heap tuple's xmin/xmax and ratchet up the
+ * latestRemovedXid. No need to consider xvac values here.
*/
htupxid = HeapTupleHeaderGetXmin(htuphdr);
if (TransactionIdFollows(htupxid, latestRemovedXid))
/*
* Conjecture: if hitemid is dead then it had xids before the xids
* marked on LP_NORMAL items. So we just ignore this item and move
- * onto the next, for the purposes of calculating latestRemovedxids.
+ * onto the next, for the purposes of calculating
+ * latestRemovedxids.
*/
}
else
UnlockReleaseBuffer(ibuffer);
/*
- * Note that if all heap tuples were LP_DEAD then we will be
- * returning InvalidTransactionId here. That can happen if we are
- * re-replaying this record type, though that will be before the
- * consistency point and will not cause problems. It should
- * happen very rarely after the consistency point, though note
- * that we can't tell the difference between this and the fast
- * path exit above. May need to change that in future.
+ * Note that if all heap tuples were LP_DEAD then we will be returning
+ * InvalidTransactionId here. That can happen if we are re-replaying this
+ * record type, though that will be before the consistency point and will
+ * not cause problems. It should happen very rarely after the consistency
+ * point, though note that we can't tell the difference between this and
+ * the fast path exit above. May need to change that in future.
*/
return latestRemovedXid;
}
switch (info)
{
case XLOG_BTREE_DELETE:
+
/*
* Btree delete records can conflict with standby queries. You
* might think that vacuum records would conflict as well, but
break;
case XLOG_BTREE_REUSE_PAGE:
+
/*
* Btree reuse page records exist to provide a conflict point
* when we reuse pages in the index via the FSM. That's all it
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
appendStringInfo(buf, "delete: index %u/%u/%u; iblk %u, heap %u/%u/%u;",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
xlrec->block,
xlrec->hnode.spcNode, xlrec->hnode.dbNode, xlrec->hnode.relNode);
break;
appendStringInfo(buf, "reuse_page: rel %u/%u/%u; latestRemovedXid %u",
xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->latestRemovedXid);
+ xlrec->node.relNode, xlrec->latestRemovedXid);
break;
}
default:
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.61 2010/04/28 00:09:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.62 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
Assert(TransactionIdIsValid(xid));
if (max_prepared_xacts <= 0)
- return false; /* nothing to do */
+ return false; /* nothing to do */
/* Read and validate file */
buf = ReadTwoPhaseFile(xid, false);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.292 2010/06/29 18:44:58 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.293 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Asynchronous commit case:
*
* This enables possible committed transaction loss in the case of a
- * postmaster crash because WAL buffers are left unwritten.
- * Ideally we could issue the WAL write without the fsync, but
- * some wal_sync_methods do not allow separate write/fsync.
+ * postmaster crash because WAL buffers are left unwritten. Ideally we
+ * could issue the WAL write without the fsync, but some
+ * wal_sync_methods do not allow separate write/fsync.
*
* Report the latest async commit LSN, so that the WAL writer knows to
* flush this commit.
/*
* Report the latest async abort LSN, so that the WAL writer knows to
- * flush this abort. There's nothing to be gained by delaying this,
- * since WALWriter may as well do this when it can. This is important
- * with streaming replication because if we don't flush WAL regularly
- * we will find that large aborts leave us with a long backlog for
- * when commits occur after the abort, increasing our window of data
- * loss should problems occur at that point.
+ * flush this abort. There's nothing to be gained by delaying this, since
+ * WALWriter may as well do this when it can. This is important with
+ * streaming replication because if we don't flush WAL regularly we will
+ * find that large aborts leave us with a long backlog for when commits
+ * occur after the abort, increasing our window of data loss should
+ * problems occur at that point.
*/
if (!isSubXact)
XLogSetAsyncCommitLSN(XactLastRecEnd);
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.429 2010/07/03 22:15:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.430 2010/07/06 19:18:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
TransactionId ckptXid;
XLogRecPtr asyncCommitLSN; /* LSN of newest async commit */
- uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */
+ uint32 lastRemovedLog; /* latest removed/recycled XLOG segment */
uint32 lastRemovedSeg;
/* Protected by WALWriteLock: */
int XLogCacheBlck; /* highest allocated xlog buffer index */
TimeLineID ThisTimeLineID;
TimeLineID RecoveryTargetTLI;
+
/*
* archiveCleanupCommand is read from recovery.conf but needs to be in
* shared memory so that the bgwriter process can access it.
static uint32 readSeg = 0;
static uint32 readOff = 0;
static uint32 readLen = 0;
-static int readSource = 0; /* XLOG_FROM_* code */
+static int readSource = 0; /* XLOG_FROM_* code */
/*
* Keeps track of which sources we've tried to read the current WAL
* record from and failed.
*/
-static int failedSources = 0; /* OR of XLOG_FROM_* codes */
+static int failedSources = 0; /* OR of XLOG_FROM_* codes */
/*
* These variables track when we last obtained some WAL data to process,
* to process right now.)
*/
static TimestampTz XLogReceiptTime = 0;
-static int XLogReceiptSource = 0; /* XLOG_FROM_* code */
+static int XLogReceiptSource = 0; /* XLOG_FROM_* code */
/* Buffer for currently read page (XLOG_BLCKSZ bytes) */
static char *readBuf = NULL;
int sources);
static bool XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
bool randAccess);
-static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr);
+static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr);
static void XLogFileClose(void);
static bool RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize);
}
/*
- * If already known flushed, we're done. Just need to check if we
- * are holding an open file handle to a logfile that's no longer
- * in use, preventing the file from being deleted.
+ * If already known flushed, we're done. Just need to check if we are
+ * holding an open file handle to a logfile that's no longer in use,
+ * preventing the file from being deleted.
*/
if (XLByteLE(WriteRqstPtr, LogwrtResult.Flush))
{
- if (openLogFile >= 0) {
+ if (openLogFile >= 0)
+ {
if (!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
XLogFileClose();
XLogFileName(xlogfname, endTLI, endLogId, endLogSeg);
/*
- * Write comment to history file to explain why and where timeline changed.
- * Comment varies according to the recovery target used.
+ * Write comment to history file to explain why and where timeline
+ * changed. Comment varies according to the recovery target used.
*/
if (recoveryTarget == RECOVERY_TARGET_XID)
snprintf(buffer, sizeof(buffer),
*key_p = *value_p = NULL;
/*
- * Allocate the buffer on first use. It's used to hold both the
- * parameter name and value.
+ * Allocate the buffer on first use. It's used to hold both the parameter
+ * name and value.
*/
if (buf == NULL)
buf = malloc(MAXPGPATH + 1);
}
}
else if (*ptr == '\0')
- return false; /* unterminated quoted string */
+ return false; /* unterminated quoted string */
else
*(bufp++) = *ptr;
GetXLogReceiptTime(TimestampTz *rtime, bool *fromStream)
{
/*
- * This must be executed in the startup process, since we don't export
- * the relevant state to shared memory.
+ * This must be executed in the startup process, since we don't export the
+ * relevant state to shared memory.
*/
Assert(InRecovery);
CheckRequiredParameterValues(void)
{
/*
- * For archive recovery, the WAL must be generated with at least
- * 'archive' wal_level.
+ * For archive recovery, the WAL must be generated with at least 'archive'
+ * wal_level.
*/
if (InArchiveRecovery && ControlFile->wal_level == WAL_LEVEL_MINIMAL)
{
}
/*
- * For Hot Standby, the WAL must be generated with 'hot_standby' mode,
- * and we must have at least as many backend slots as the primary.
+ * For Hot Standby, the WAL must be generated with 'hot_standby' mode, and
+ * we must have at least as many backend slots as the primary.
*/
if (InArchiveRecovery && EnableHotStandby)
{
ControlFile->checkPointCopy.ThisTimeLineID)));
/*
- * Save the selected recovery target timeline ID and archive_cleanup_command
- * in shared memory so that other processes can see them
+ * Save the selected recovery target timeline ID and
+ * archive_cleanup_command in shared memory so that other processes can
+ * see them
*/
XLogCtl->RecoveryTargetTLI = recoveryTargetTLI;
strncpy(XLogCtl->archiveCleanupCommand,
(errmsg("entering standby mode")));
else if (recoveryTarget == RECOVERY_TARGET_XID)
ereport(LOG,
- (errmsg("starting point-in-time recovery to XID %u",
- recoveryTargetXid)));
+ (errmsg("starting point-in-time recovery to XID %u",
+ recoveryTargetXid)));
else if (recoveryTarget == RECOVERY_TARGET_TIME)
ereport(LOG,
(errmsg("starting point-in-time recovery to %s",
if (InRecovery)
{
int rmid;
+
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
CheckRequiredParameterValues();
/*
- * Initialize for Hot Standby, if enabled. We won't let backends
- * in yet, not until we've reached the min recovery point specified in
+ * Initialize for Hot Standby, if enabled. We won't let backends in
+ * yet, not until we've reached the min recovery point specified in
* control file and we've established a recovery snapshot from a
* running-xacts WAL record.
*/
/*
* If we're beginning at a shutdown checkpoint, we know that
- * nothing was running on the master at this point. So fake-up
- * an empty running-xacts record and use that here and now.
- * Recover additional standby state for prepared transactions.
+ * nothing was running on the master at this point. So fake-up an
+ * empty running-xacts record and use that here and now. Recover
+ * additional standby state for prepared transactions.
*/
if (wasShutdown)
{
TransactionId latestCompletedXid;
/*
- * Construct a RunningTransactions snapshot representing a shut
- * down server, with only prepared transactions still alive.
- * We're never overflowed at this point because all subxids
- * are listed with their parent prepared transactions.
+ * Construct a RunningTransactions snapshot representing a
+ * shut down server, with only prepared transactions still
+ * alive. We're never overflowed at this point because all
+ * subxids are listed with their parent prepared transactions.
*/
running.xcnt = nxids;
running.subxid_overflow = false;
* recoveryLastXTime.
*
* This is slightly confusing if we're starting from an online
- * checkpoint; we've just read and replayed the chekpoint record,
- * but we're going to start replay from its redo pointer, which
- * precedes the location of the checkpoint record itself. So even
- * though the last record we've replayed is indeed ReadRecPtr, we
- * haven't replayed all the preceding records yet. That's OK for
- * the current use of these variables.
+ * checkpoint; we've just read and replayed the chekpoint record, but
+ * we're going to start replay from its redo pointer, which precedes
+ * the location of the checkpoint record itself. So even though the
+ * last record we've replayed is indeed ReadRecPtr, we haven't
+ * replayed all the preceding records yet. That's OK for the current
+ * use of these variables.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replayEndRecPtr = ReadRecPtr;
XLogReceiptTime = GetCurrentTimestamp();
/*
- * Let postmaster know we've started redo now, so that it can
- * launch bgwriter to perform restartpoints. We don't bother
- * during crash recovery as restartpoints can only be performed
- * during archive recovery. And we'd like to keep crash recovery
- * simple, to avoid introducing bugs that could affect you when
- * recovering after crash.
+ * Let postmaster know we've started redo now, so that it can launch
+ * bgwriter to perform restartpoints. We don't bother during crash
+ * recovery as restartpoints can only be performed during archive
+ * recovery. And we'd like to keep crash recovery simple, to avoid
+ * introducing bugs that could affect you when recovering after crash.
*
* After this point, we can no longer assume that we're the only
* process in addition to postmaster! Also, fsync requests are
}
/*
- * Allow read-only connections immediately if we're consistent already.
+ * Allow read-only connections immediately if we're consistent
+ * already.
*/
CheckRecoveryConsistency();
xlogctl->replayEndRecPtr = EndRecPtr;
SpinLockRelease(&xlogctl->info_lck);
- /* If we are attempting to enter Hot Standby mode, process XIDs we see */
+ /*
+ * If we are attempting to enter Hot Standby mode, process
+ * XIDs we see
+ */
if (standbyState >= STANDBY_INITIALIZED &&
TransactionIdIsValid(record->xl_xid))
RecordKnownAssignedTransactionIds(record->xl_xid);
static void
CheckRecoveryConsistency(void)
{
- static bool backendsAllowed = false;
+ static bool backendsAllowed = false;
/*
* Have we passed our safe starting point?
}
/*
- * Have we got a valid starting snapshot that will allow
- * queries to be run? If so, we can tell postmaster that the
- * database is consistent now, enabling connections.
+ * Have we got a valid starting snapshot that will allow queries to be
+ * run? If so, we can tell postmaster that the database is consistent now,
+ * enabling connections.
*/
if (standbyState == STANDBY_SNAPSHOT_READY &&
!backendsAllowed &&
{
/*
* Calculate the last segment that we need to retain because of
- * wal_keep_segments, by subtracting wal_keep_segments from the
- * new checkpoint location.
+ * wal_keep_segments, by subtracting wal_keep_segments from the new
+ * checkpoint location.
*/
if (wal_keep_segments > 0)
{
CheckPoint lastCheckPoint;
uint32 _logId;
uint32 _logSeg;
- TimestampTz xtime;
+ TimestampTz xtime;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
* restartpoint, we can't perform a new restart point. We still update
* minRecoveryPoint in that case, so that if this is a shutdown restart
* point, we won't start up earlier than before. That's not strictly
- * necessary, but when hot standby is enabled, it would be rather
- * weird if the database opened up for read-only connections at a
- * point-in-time before the last shutdown. Such time travel is still
- * possible in case of immediate shutdown, though.
+ * necessary, but when hot standby is enabled, it would be rather weird if
+ * the database opened up for read-only connections at a point-in-time
+ * before the last shutdown. Such time travel is still possible in case of
+ * immediate shutdown, though.
*
* We don't explicitly advance minRecoveryPoint when we do create a
* restartpoint. It's assumed that flushing the buffers will do that as a
}
/*
- * Update the shared RedoRecPtr so that the startup process can
- * calculate the number of segments replayed since last restartpoint,
- * and request a restartpoint if it exceeds checkpoint_segments.
+ * Update the shared RedoRecPtr so that the startup process can calculate
+ * the number of segments replayed since last restartpoint, and request a
+ * restartpoint if it exceeds checkpoint_segments.
*
* You need to hold WALInsertLock and info_lck to update it, although
* during recovery acquiring WALInsertLock is just pro forma, because
ereport((log_checkpoints ? LOG : DEBUG2),
(errmsg("recovery restart point at %X/%X",
lastCheckPoint.redo.xlogid, lastCheckPoint.redo.xrecoff),
- xtime ? errdetail("last completed transaction was at log time %s",
- timestamptz_to_str(xtime)) : 0));
+ xtime ? errdetail("last completed transaction was at log time %s",
+ timestamptz_to_str(xtime)) : 0));
LWLockRelease(CheckpointLock);
max_locks_per_xact != ControlFile->max_locks_per_xact)
{
/*
- * The change in number of backend slots doesn't need to be
- * WAL-logged if archiving is not enabled, as you can't start
- * archive recovery with wal_level=minimal anyway. We don't
- * really care about the values in pg_control either if
- * wal_level=minimal, but seems better to keep them up-to-date
- * to avoid confusion.
+ * The change in number of backend slots doesn't need to be WAL-logged
+ * if archiving is not enabled, as you can't start archive recovery
+ * with wal_level=minimal anyway. We don't really care about the
+ * values in pg_control either if wal_level=minimal, but seems better
+ * to keep them up-to-date to avoid confusion.
*/
if (wal_level != ControlFile->wal_level || XLogIsNeeded())
{
SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
/*
- * If we see a shutdown checkpoint while waiting for an
- * end-of-backup record, the backup was cancelled and the
- * end-of-backup record will never arrive.
+ * If we see a shutdown checkpoint while waiting for an end-of-backup
+ * record, the backup was cancelled and the end-of-backup record will
+ * never arrive.
*/
if (InArchiveRecovery &&
!XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
(errmsg("online backup was cancelled, recovery cannot continue")));
/*
- * If we see a shutdown checkpoint, we know that nothing was
- * running on the master at this point. So fake-up an empty
- * running-xacts record and use that here and now. Recover
- * additional standby state for prepared transactions.
+ * If we see a shutdown checkpoint, we know that nothing was running
+ * on the master at this point. So fake-up an empty running-xacts
+ * record and use that here and now. Recover additional standby state
+ * for prepared transactions.
*/
if (standbyState >= STANDBY_INITIALIZED)
{
/*
* Construct a RunningTransactions snapshot representing a shut
- * down server, with only prepared transactions still alive.
- * We're never overflowed at this point because all subxids
- * are listed with their parent prepared transactions.
+ * down server, with only prepared transactions still alive. We're
+ * never overflowed at this point because all subxids are listed
+ * with their parent prepared transactions.
*/
running.xcnt = nxids;
running.subxid_overflow = false;
ControlFile->max_prepared_xacts = xlrec.max_prepared_xacts;
ControlFile->max_locks_per_xact = xlrec.max_locks_per_xact;
ControlFile->wal_level = xlrec.wal_level;
+
/*
- * Update minRecoveryPoint to ensure that if recovery is aborted,
- * we recover back up to this point before allowing hot standby
- * again. This is particularly important if wal_level was set to
- * 'archive' before, and is now 'hot_standby', to ensure you don't
- * run queries against the WAL preceding the wal_level change.
- * Same applies to decreasing max_* settings.
+ * Update minRecoveryPoint to ensure that if recovery is aborted, we
+ * recover back up to this point before allowing hot standby again.
+ * This is particularly important if wal_level was set to 'archive'
+ * before, and is now 'hot_standby', to ensure you don't run queries
+ * against the WAL preceding the wal_level change. Same applies to
+ * decreasing max_* settings.
*/
minRecoveryPoint = ControlFile->minRecoveryPoint;
if ((minRecoveryPoint.xlogid != 0 || minRecoveryPoint.xrecoff != 0)
if (!XLogIsNeeded())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL level not sufficient for making an online backup"),
+ errmsg("WAL level not sufficient for making an online backup"),
errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
backupidstr = text_to_cstring(backupid);
if (!XLogIsNeeded())
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("WAL level not sufficient for making an online backup"),
+ errmsg("WAL level not sufficient for making an online backup"),
errhint("wal_level must be set to \"archive\" or \"hot_standby\" at server start.")));
/*
/*
* If archiving is enabled, wait for all the required WAL files to be
- * archived before returning. If archiving isn't enabled, the required
- * WAL needs to be transported via streaming replication (hopefully
- * with wal_keep_segments set high enough), or some more exotic
- * mechanism like polling and copying files from pg_xlog with script.
- * We have no knowledge of those mechanisms, so it's up to the user to
- * ensure that he gets all the required WAL.
+ * archived before returning. If archiving isn't enabled, the required WAL
+ * needs to be transported via streaming replication (hopefully with
+ * wal_keep_segments set high enough), or some more exotic mechanism like
+ * polling and copying files from pg_xlog with script. We have no
+ * knowledge of those mechanisms, so it's up to the user to ensure that he
+ * gets all the required WAL.
*
* We wait until both the last WAL file filled during backup and the
- * history file have been archived, and assume that the alphabetic
- * sorting property of the WAL files ensures any earlier WAL files are
- * safely archived as well.
+ * history file have been archived, and assume that the alphabetic sorting
+ * property of the WAL files ensures any earlier WAL files are safely
+ * archived as well.
*
* We wait forever, since archive_command is supposed to work and we
* assume the admin wanted his backup to work completely. If you don't
*/
if (XLogArchivingActive())
{
- XLByteToPrevSeg(stoppoint, _logId, _logSeg);
- XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
-
- XLByteToSeg(startpoint, _logId, _logSeg);
- BackupHistoryFileName(histfilename, ThisTimeLineID, _logId, _logSeg,
- startpoint.xrecoff % XLogSegSize);
+ XLByteToPrevSeg(stoppoint, _logId, _logSeg);
+ XLogFileName(lastxlogfilename, ThisTimeLineID, _logId, _logSeg);
- seconds_before_warning = 60;
- waits = 0;
+ XLByteToSeg(startpoint, _logId, _logSeg);
+ BackupHistoryFileName(histfilename, ThisTimeLineID, _logId, _logSeg,
+ startpoint.xrecoff % XLogSegSize);
- while (XLogArchiveIsBusy(lastxlogfilename) ||
- XLogArchiveIsBusy(histfilename))
- {
- CHECK_FOR_INTERRUPTS();
+ seconds_before_warning = 60;
+ waits = 0;
- if (!reported_waiting && waits > 5)
+ while (XLogArchiveIsBusy(lastxlogfilename) ||
+ XLogArchiveIsBusy(histfilename))
{
- ereport(NOTICE,
- (errmsg("pg_stop_backup cleanup done, waiting for required WAL segments to be archived")));
- reported_waiting = true;
- }
+ CHECK_FOR_INTERRUPTS();
- pg_usleep(1000000L);
+ if (!reported_waiting && waits > 5)
+ {
+ ereport(NOTICE,
+ (errmsg("pg_stop_backup cleanup done, waiting for required WAL segments to be archived")));
+ reported_waiting = true;
+ }
- if (++waits >= seconds_before_warning)
- {
- seconds_before_warning *= 2; /* This wraps in >10 years... */
- ereport(WARNING,
- (errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
- waits),
- errhint("Check that your archive_command is executing properly. "
- "pg_stop_backup can be cancelled safely, "
- "but the database backup will not be usable without all the WAL segments.")));
+ pg_usleep(1000000L);
+
+ if (++waits >= seconds_before_warning)
+ {
+ seconds_before_warning *= 2; /* This wraps in >10 years... */
+ ereport(WARNING,
+ (errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
+ waits),
+ errhint("Check that your archive_command is executing properly. "
+ "pg_stop_backup can be cancelled safely, "
+ "but the database backup will not be usable without all the WAL segments.")));
+ }
}
- }
- ereport(NOTICE,
- (errmsg("pg_stop_backup complete, all required WAL segments have been archived")));
+ ereport(NOTICE,
+ (errmsg("pg_stop_backup complete, all required WAL segments have been archived")));
}
else
ereport(NOTICE,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
- errhint("pg_xlogfile_name() cannot be executed during recovery.")));
+ errhint("pg_xlogfile_name() cannot be executed during recovery.")));
locationstr = text_to_cstring(location);
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
/*
- * Signal bgwriter to start a restartpoint if we've replayed too
- * much xlog since the last one.
+ * Signal bgwriter to start a restartpoint if we've replayed too much
+ * xlog since the last one.
*/
if (StandbyMode && bgwriterLaunched)
{
{
if (WalRcvInProgress())
{
- bool havedata;
+ bool havedata;
/*
* If we find an invalid record in the WAL streamed from
* master, something is seriously wrong. There's little
- * chance that the problem will just go away, but PANIC
- * is not good for availability either, especially in
- * hot standby mode. Disconnect, and retry from
- * archive/pg_xlog again. The WAL in the archive should
- * be identical to what was streamed, so it's unlikely
- * that it helps, but one can hope...
+ * chance that the problem will just go away, but PANIC is
+ * not good for availability either, especially in hot
+ * standby mode. Disconnect, and retry from
+ * archive/pg_xlog again. The WAL in the archive should be
+ * identical to what was streamed, so it's unlikely that
+ * it helps, but one can hope...
*/
if (failedSources & XLOG_FROM_STREAM)
{
* WAL from walreceiver and observe that we had already
* processed everything before the most recent "chunk"
* that it flushed to disk. In steady state where we are
- * keeping up with the incoming data, XLogReceiptTime
- * will be updated on each cycle. When we are behind,
+ * keeping up with the incoming data, XLogReceiptTime will
+ * be updated on each cycle. When we are behind,
* XLogReceiptTime will not advance, so the grace time
* alloted to conflicting queries will decrease.
*/
}
else
{
- int sources;
- pg_time_t now;
+ int sources;
+ pg_time_t now;
/*
* Until walreceiver manages to reconnect, poll the
/*
* If primary_conninfo is set, launch walreceiver to
- * try to stream the missing WAL, before retrying
- * to restore from archive/pg_xlog.
+ * try to stream the missing WAL, before retrying to
+ * restore from archive/pg_xlog.
*
* If fetching_ckpt is TRUE, RecPtr points to the
* initial checkpoint location. In that case, we use
- * RedoStartLSN as the streaming start position instead
- * of RecPtr, so that when we later jump backwards to
- * start redo at RedoStartLSN, we will have the logs
- * streamed already.
+ * RedoStartLSN as the streaming start position
+ * instead of RecPtr, so that when we later jump
+ * backwards to start redo at RedoStartLSN, we will
+ * have the logs streamed already.
*/
if (PrimaryConnInfo)
{
RequestXLogStreaming(
- fetching_ckpt ? RedoStartLSN : *RecPtr,
- PrimaryConnInfo);
+ fetching_ckpt ? RedoStartLSN : *RecPtr,
+ PrimaryConnInfo);
continue;
}
}
failedSources |= sources;
/*
- * Check to see if the trigger file exists. Note that
- * we do this only after failure, so when you create
- * the trigger file, we still finish replaying as much
- * as we can from archive and pg_xlog before failover.
+ * Check to see if the trigger file exists. Note that we
+ * do this only after failure, so when you create the
+ * trigger file, we still finish replaying as much as we
+ * can from archive and pg_xlog before failover.
*/
if (CheckForStandbyTrigger())
goto triggered;
/* In archive or crash recovery. */
if (readFile < 0)
{
- int sources;
+ int sources;
/* Reset curFileTLI if random fetch. */
if (randAccess)
}
/*
- * At this point, we have the right segment open and if we're streaming
- * we know the requested record is in it.
+ * At this point, we have the right segment open and if we're streaming we
+ * know the requested record is in it.
*/
Assert(readFile != -1);
* in the current WAL page, previously read by XLogPageRead().
*
* 'emode' is the error mode that would be used to report a file-not-found
- * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
+ * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
* we're retrying the exact same record that we've tried previously, only
- * complain the first time to keep the noise down. However, we only do when
+ * complain the first time to keep the noise down. However, we only do when
* reading from pg_xlog, because we don't expect any invalid records in archive
* or in records streamed from master. Files in the archive should be complete,
* and we should never hit the end of WAL because we stop and wait for more WAL
- * to arrive before replaying it.
+ * to arrive before replaying it.
*
* NOTE: This function remembers the RecPtr value it was last called with,
* to suppress repeated messages about the same record. Only call this when
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.167 2010/06/13 17:43:12 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.168 2010/07/06 19:18:55 momjian Exp $
*
* NOTES
* See acl.h.
if (is_grant)
{
if (this_privileges == 0)
- {
+ {
if (objkind == ACL_KIND_COLUMN && colname)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
else
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
- errmsg("not all privileges could be revoked for \"%s\"",
- objname)));
+ errmsg("not all privileges could be revoked for \"%s\"",
+ objname)));
}
}
/*
* The default for a global entry is the hard-wired default ACL for the
- * particular object type. The default for non-global entries is an empty
+ * particular object type. The default for non-global entries is an empty
* ACL. This must be so because global entries replace the hard-wired
* defaults, while others are added on.
*/
/*
* If the result is the same as the default value, we do not need an
- * explicit pg_default_acl entry, and should in fact remove the entry
- * if it exists. Must sort both arrays to compare properly.
+ * explicit pg_default_acl entry, and should in fact remove the entry if
+ * it exists. Must sort both arrays to compare properly.
*/
aclitemsort(new_acl);
aclitemsort(def_acl);
if (OidIsValid(iacls->nspid))
{
ObjectAddress myself,
- referenced;
+ referenced;
myself.classId = DefaultAclRelationId;
myself.objectId = HeapTupleGetOid(newtuple);
case ACLCHECK_NO_PRIV:
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied for column \"%s\" of relation \"%s\"",
- colname, objectname)));
+ errmsg("permission denied for column \"%s\" of relation \"%s\"",
+ colname, objectname)));
break;
case ACLCHECK_NOT_OWNER:
/* relation msg is OK since columns don't have separate owners */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.175 2010/05/11 04:52:28 itagaki Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.176 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Set per-function configuration parameters */
set_items = (ArrayType *) DatumGetPointer(proconfig);
- if (set_items) /* Need a new GUC nesting level */
+ if (set_items) /* Need a new GUC nesting level */
{
save_nestlevel = NewGUCNestLevel();
ProcessGUCArray(set_items,
GUC_ACTION_SAVE);
}
else
- save_nestlevel = 0; /* keep compiler quiet */
+ save_nestlevel = 0; /* keep compiler quiet */
OidFunctionCall1(languageValidator, ObjectIdGetDatum(retval));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.42 2010/07/03 13:53:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.43 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* and then insert or delete from pg_shdepend as appropiate.
*
* Note that we can't just insert all referenced roles blindly during GRANT,
- * because we would end up with duplicate registered dependencies. We could
+ * because we would end up with duplicate registered dependencies. We could
* check for existence of the tuples before inserting, but that seems to be
- * more expensive than what we are doing here. Likewise we can't just delete
+ * more expensive than what we are doing here. Likewise we can't just delete
* blindly during REVOKE, because the user may still have other privileges.
* It is also possible that REVOKE actually adds dependencies, due to
* instantiation of a formerly implicit default ACL (although at present,
*
* NOTE: Both input arrays must be sorted and de-duped. (Typically they
* are extracted from an ACL array by aclmembers(), which takes care of
- * both requirements.) The arrays are pfreed before return.
+ * both requirements.) The arrays are pfreed before return.
*/
void
updateAclDependencies(Oid classId, Oid objectId, int32 objsubId,
int i;
/*
- * Remove entries that are common to both lists; those represent
- * existing dependencies we don't need to change.
+ * Remove entries that are common to both lists; those represent existing
+ * dependencies we don't need to change.
*
* OK to overwrite the inputs since we'll pfree them anyway.
*/
continue;
shdepDropDependency(sdepRel, classId, objectId, objsubId,
- false, /* exact match on objsubId */
+ false, /* exact match on objsubId */
AuthIdRelationId, roleid,
SHARED_DEPENDENCY_ACL);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.197 2010/06/01 00:33:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.198 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* First run the expression through the planner. This has a couple of
- * important consequences. First, function default arguments will get
+ * important consequences. First, function default arguments will get
* inserted, which may affect volatility (consider "default now()").
* Second, inline-able functions will get inlined, which may allow us to
- * conclude that the function is really less volatile than it's marked.
- * As an example, polymorphic functions must be marked with the most
- * volatile behavior that they have for any input type, but once we
- * inline the function we may be able to conclude that it's not so
- * volatile for the particular input type we're dealing with.
+ * conclude that the function is really less volatile than it's marked. As
+ * an example, polymorphic functions must be marked with the most volatile
+ * behavior that they have for any input type, but once we inline the
+ * function we may be able to conclude that it's not so volatile for the
+ * particular input type we're dealing with.
*
* We assume here that expression_planner() won't scribble on its input.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.67 2010/07/03 13:53:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.68 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
AlterOpClassOwner_oid(Oid opclassOid, Oid newOwnerId)
{
- HeapTuple tup;
- Relation rel;
+ HeapTuple tup;
+ Relation rel;
rel = heap_open(OperatorClassRelationId, RowExclusiveLock);
void
AlterOpFamilyOwner_oid(Oid opfamilyOid, Oid newOwnerId)
{
- HeapTuple tup;
- Relation rel;
+ HeapTuple tup;
+ Relation rel;
rel = heap_open(OperatorFamilyRelationId, RowExclusiveLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.46 2010/06/22 11:36:16 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.47 2010/07/06 19:18:56 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
oprNamespace = QualifiedNameGetCreationNamespace(names, &oprName);
/*
- * The SQL standard committee has decided that => should be used for
- * named parameters; therefore, a future release of PostgreSQL may
- * disallow it as the name of a user-defined operator.
+ * The SQL standard committee has decided that => should be used for named
+ * parameters; therefore, a future release of PostgreSQL may disallow it
+ * as the name of a user-defined operator.
*/
if (strcmp(oprName, "=>") == 0)
ereport(WARNING,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.331 2010/07/01 14:10:21 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.332 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Renaming the columns of sequences or toast tables doesn't actually
* break anything from the system's point of view, since internal
- * references are by attnum. But it doesn't seem right to allow users
- * to change names that are hardcoded into the system, hence the following
+ * references are by attnum. But it doesn't seem right to allow users to
+ * change names that are hardcoded into the system, hence the following
* restriction.
*/
relkind = RelationGetForm(targetrelation)->relkind;
relkind != RELKIND_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("\"%s\" is not a table, view, composite type or index",
- RelationGetRelationName(targetrelation))));
+ errmsg("\"%s\" is not a table, view, composite type or index",
+ RelationGetRelationName(targetrelation))));
/*
* permissions checking. only the owner of a class can change its schema.
for (blkno = 0; blkno < nblocks; blkno++)
{
- /* If we got a cancel signal during the copy of the data, quit */
- CHECK_FOR_INTERRUPTS();
-
+ /* If we got a cancel signal during the copy of the data, quit */
+ CHECK_FOR_INTERRUPTS();
+
smgrread(src, forkNum, blkno, buf);
/* XLOG stuff */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.75 2010/07/02 02:44:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.76 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(errcode(ERRCODE_UNDEFINED_FILE),
errmsg("directory \"%s\" does not exist", location),
InRecovery ? errhint("Create directory \"%s\" for this tablespace before "
- "restarting the server.", location) : 0));
+ "restarting the server.", location) : 0));
else
ereport(ERROR,
(errcode_for_file_access(),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.135 2010/04/22 02:15:45 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.136 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
vacrelstats);
/* Remove tuples from heap */
lazy_vacuum_heap(onerel, vacrelstats);
+
/*
* Forget the now-vacuumed tuples, and press on, but be careful
- * not to reset latestRemovedXid since we want that value to be valid.
+ * not to reset latestRemovedXid since we want that value to be
+ * valid.
*/
vacrelstats->num_dead_tuples = 0;
vacrelstats->num_index_scans++;
* We count tuples removed by the pruning step as removed by VACUUM.
*/
tups_vacuumed += heap_page_prune(onerel, buf, OldestXmin, false,
- &vacrelstats->latestRemovedXid);
+ &vacrelstats->latestRemovedXid);
+
/*
* Now scan the page to collect vacuumable items and check for tuples
* requiring freezing.
{
/* Remove tuples from heap */
lazy_vacuum_page(onerel, blkno, buf, 0, vacrelstats);
+
/*
* Forget the now-vacuumed tuples, and press on, but be careful
- * not to reset latestRemovedXid since we want that value to be valid.
+ * not to reset latestRemovedXid since we want that value to be
+ * valid.
*/
vacrelstats->num_dead_tuples = 0;
vacuumed_pages++;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.172 2010/05/29 02:32:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.173 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We should have found our tuple in the index, unless we exited the loop
- * early because of conflict. Complain if not. If we ever implement
- * '<>' index opclasses, this check will fail and will have to be removed.
+ * early because of conflict. Complain if not. If we ever implement '<>'
+ * index opclasses, this check will fail and will have to be removed.
*/
if (!found_self && !conflict)
ereport(ERROR,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.143 2010/03/19 22:54:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.144 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
{
/*
- * Assume we failed during init_sql_fcache(). (It's possible that
- * the function actually has an empty body, but in that case we may
- * as well report all errors as being "during startup".)
+ * Assume we failed during init_sql_fcache(). (It's possible that the
+ * function actually has an empty body, but in that case we may as
+ * well report all errors as being "during startup".)
*/
errcontext("SQL function \"%s\" during startup", fcache->fname);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.102 2010/05/28 01:14:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.103 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* input, since we assume mergejoin operators are strict. If the NULL
* is in the first join column, and that column sorts nulls last, then
* we can further conclude that no following tuple can match anything
- * either, since they must all have nulls in the first column. However,
+ * either, since they must all have nulls in the first column. However,
* that case is only interesting if we're not in FillOuter mode, else
* we have to visit all the tuples anyway.
*
switch (MJEvalInnerValues(node, innerTupleSlot))
{
case MJEVAL_MATCHABLE:
+
/*
* OK, we have the initial tuples. Begin by skipping
* non-matching tuples.
switch (MJEvalInnerValues(node, innerTupleSlot))
{
case MJEVAL_MATCHABLE:
+
/*
* Test the new inner tuple to see if it matches
* outer.
}
break;
case MJEVAL_NONMATCHABLE:
+
/*
* It contains a NULL and hence can't match any outer
* tuple, so we can skip the comparison and assume the
node->mj_JoinState = EXEC_MJ_NEXTOUTER;
break;
case MJEVAL_ENDOFJOIN:
+
/*
- * No more inner tuples. However, this might be
- * only effective and not physical end of inner plan,
- * so force mj_InnerTupleSlot to null to make sure we
+ * No more inner tuples. However, this might be only
+ * effective and not physical end of inner plan, so
+ * force mj_InnerTupleSlot to null to make sure we
* don't fetch more inner tuples. (We need this hack
* because we are not transiting to a state where the
* inner plan is assumed to be exhausted.)
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
break;
case MJEVAL_NONMATCHABLE:
+
/*
* current inner can't possibly match any outer;
- * better to advance the inner scan than the outer.
+ * better to advance the inner scan than the
+ * outer.
*/
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
break;
node->mj_JoinState = EXEC_MJ_SKIP_TEST;
break;
case MJEVAL_NONMATCHABLE:
+
/*
* current inner can't possibly match any outer;
* better to advance the inner scan than the outer.
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.53 2010/05/08 16:39:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.54 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Keep a trailing null in place, even though it's probably useless for
- * binary data. (Some callers are dealing with text but call this
- * because their input isn't null-terminated.)
+ * binary data. (Some callers are dealing with text but call this because
+ * their input isn't null-terminated.)
*/
str->data[str->len] = '\0';
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.202 2010/06/29 04:12:47 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.203 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*----------------------------------------------------------------
*/
#ifdef ENABLE_SSPI
-typedef SECURITY_STATUS
+typedef SECURITY_STATUS
(WINAPI * QUERY_SECURITY_CONTEXT_TOKEN_FN) (
PCtxtHandle, void **);
static int pg_SSPI_recvauth(Port *port);
auth_failed(Port *port, int status)
{
const char *errstr;
- int errcode_return = ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION;
-
+ int errcode_return = ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION;
+
/*
* If we failed due to EOF from client, just quit; there's no point in
* trying to send a message to the client, and not much point in logging
/*
* An explicit "reject" entry in pg_hba.conf. This report exposes
- * the fact that there's an explicit reject entry, which is perhaps
- * not so desirable from a security standpoint; but the message
- * for an implicit reject could confuse the DBA a lot when the
- * true situation is a match to an explicit reject. And we don't
- * want to change the message for an implicit reject. As noted
- * below, the additional information shown here doesn't expose
- * anything not known to an attacker.
+ * the fact that there's an explicit reject entry, which is
+ * perhaps not so desirable from a security standpoint; but the
+ * message for an implicit reject could confuse the DBA a lot when
+ * the true situation is a match to an explicit reject. And we
+ * don't want to change the message for an implicit reject. As
+ * noted below, the additional information shown here doesn't
+ * expose anything not known to an attacker.
*/
{
char hostinfo[NI_MAXHOST];
{
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
- hostinfo, port->user_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\", %s",
+ hostinfo, port->user_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"",
- hostinfo, port->user_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("pg_hba.conf rejects replication connection for host \"%s\", user \"%s\"",
+ hostinfo, port->user_name)));
#endif
}
else
{
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name,
- port->database_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name,
+ port->database_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"",
- hostinfo, port->user_name,
- port->database_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("pg_hba.conf rejects connection for host \"%s\", user \"%s\", database \"%s\"",
+ hostinfo, port->user_name,
+ port->database_name)));
#endif
}
break;
{
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\", %s",
- hostinfo, port->user_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\", %s",
+ hostinfo, port->user_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"",
- hostinfo, port->user_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for replication connection from host \"%s\", user \"%s\"",
+ hostinfo, port->user_name)));
#endif
}
else
{
#ifdef USE_SSL
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
- hostinfo, port->user_name,
- port->database_name,
- port->ssl ? _("SSL on") : _("SSL off"))));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\", %s",
+ hostinfo, port->user_name,
+ port->database_name,
+ port->ssl ? _("SSL on") : _("SSL off"))));
#else
ereport(FATAL,
- (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
- errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
- hostinfo, port->user_name,
- port->database_name)));
+ (errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
+ errmsg("no pg_hba.conf entry for host \"%s\", user \"%s\", database \"%s\"",
+ hostinfo, port->user_name,
+ port->database_name)));
#endif
}
break;
timeout.tv_sec = RADIUS_TIMEOUT;
timeout.tv_usec = 0;
FD_ZERO(&fdset);
- FD_SET (sock, &fdset);
+ FD_SET(sock, &fdset);
while (true)
{
else
{
ereport(LOG,
- (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
- receivepacket->code, port->user_name)));
+ (errmsg("RADIUS response has invalid code (%i) for user \"%s\"",
+ receivepacket->code, port->user_name)));
return STATUS_ERROR;
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.101 2010/05/26 16:15:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.102 2010/07/06 19:18:56 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
* to verify that the DBA-generated DH parameters file contains
* what we expect it to contain.
*/
-static DH *
+static DH *
load_dh_file(int keylength)
{
FILE *fp;
* To prevent problems if the DH parameters files don't even
* exist, we can load DH parameters hardcoded into this file.
*/
-static DH *
+static DH *
load_dh_buffer(const char *buffer, size_t len)
{
BIO *bio;
* the OpenSSL library can efficiently generate random keys from
* the information provided.
*/
-static DH *
+static DH *
tmp_dh_cb(SSL *s, int is_export, int keylength)
{
DH *r = NULL;
initialize_SSL(void)
{
struct stat buf;
+
STACK_OF(X509_NAME) *root_cert_list = NULL;
if (!SSL_context)
ROOT_CERT_FILE)));
}
else if (SSL_CTX_load_verify_locations(SSL_context, ROOT_CERT_FILE, NULL) != 1 ||
- (root_cert_list = SSL_load_client_CA_file(ROOT_CERT_FILE)) == NULL)
+ (root_cert_list = SSL_load_client_CA_file(ROOT_CERT_FILE)) == NULL)
{
/*
* File was there, but we could not load it. This means the file is
ssl_loaded_verify_locations = true;
}
- /*
+ /*
* Tell OpenSSL to send the list of root certs we trust to clients in
* CertificateRequests. This lets a client with a keystore select the
* appropriate client certificate to send to us.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.208 2010/06/03 19:29:38 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.209 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("hostssl not supported on this platform"),
- errhint("Compile with --with-openssl to use SSL connections."),
+ errhint("Compile with --with-openssl to use SSL connections."),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
errmsg("IP address and mask do not match"),
- errcontext("line %d of configuration file \"%s\"",
- line_num, HbaFileName)));
+ errcontext("line %d of configuration file \"%s\"",
+ line_num, HbaFileName)));
return false;
}
}
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("gssapi authentication is not supported on local sockets"),
+ errmsg("gssapi authentication is not supported on local sockets"),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
}
+
/*
- * SSPI authentication can never be enabled on ctLocal connections, because
- * it's only supported on Windows, where ctLocal isn't supported.
+ * SSPI authentication can never be enabled on ctLocal connections,
+ * because it's only supported on Windows, where ctLocal isn't supported.
*/
{
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("unrecognized authentication option name: \"%s\"",
- token),
+ errmsg("unrecognized authentication option name: \"%s\"",
+ token),
errcontext("line %d of configuration file \"%s\"",
line_num, HbaFileName)));
return false;
pg_regerror(r, &re, errstr, sizeof(errstr));
ereport(LOG,
(errcode(ERRCODE_INVALID_REGULAR_EXPRESSION),
- errmsg("regular expression match for \"%s\" failed: %s",
- file_ident_user + 1, errstr)));
+ errmsg("regular expression match for \"%s\" failed: %s",
+ file_ident_user + 1, errstr)));
*error_p = true;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.217 2010/04/19 00:55:25 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.218 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
cpu_operator_cost * inner_path_rows * rescanratio;
/*
- * Prefer materializing if it looks cheaper, unless the user has asked
- * to suppress materialization.
+ * Prefer materializing if it looks cheaper, unless the user has asked to
+ * suppress materialization.
*/
if (enable_material && mat_inner_cost < bare_inner_cost)
path->materialize_inner = true;
* selected as the input of a mergejoin, and they don't support
* mark/restore at present.
*
- * We don't test the value of enable_material here, because materialization
- * is required for correctness in this case, and turning it off does not
- * entitle us to deliver an invalid plan.
+ * We don't test the value of enable_material here, because
+ * materialization is required for correctness in this case, and turning
+ * it off does not entitle us to deliver an invalid plan.
*/
else if (innersortkeys == NIL &&
!ExecSupportsMarkRestore(inner_path->pathtype))
* We don't try to adjust the cost estimates for this consideration,
* though.
*
- * Since materialization is a performance optimization in this case, rather
- * than necessary for correctness, we skip it if enable_material is off.
+ * Since materialization is a performance optimization in this case,
+ * rather than necessary for correctness, we skip it if enable_material is
+ * off.
*/
else if (enable_material && innersortkeys != NIL &&
relation_byte_size(inner_path_rows, inner_path->parent->width) >
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/analyzejoins.c,v 1.2 2010/05/23 16:34:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/analyzejoins.c,v 1.3 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Check for relations that don't actually need to be joined at all,
* and remove them from the query.
*
- * We are passed the current joinlist and return the updated list. Other
+ * We are passed the current joinlist and return the updated list. Other
* data structures that have to be updated are accessible via "root".
*/
List *
ListCell *lc;
/*
- * We are only interested in relations that are left-joined to, so we
- * can scan the join_info_list to find them easily.
+ * We are only interested in relations that are left-joined to, so we can
+ * scan the join_info_list to find them easily.
*/
restart:
foreach(lc, root->join_info_list)
{
SpecialJoinInfo *sjinfo = (SpecialJoinInfo *) lfirst(lc);
- int innerrelid;
- int nremoved;
+ int innerrelid;
+ int nremoved;
/* Skip if not removable */
if (!join_is_removable(root, sjinfo))
* Restart the scan. This is necessary to ensure we find all
* removable joins independently of ordering of the join_info_list
* (note that removal of attr_needed bits may make a join appear
- * removable that did not before). Also, since we just deleted the
+ * removable that did not before). Also, since we just deleted the
* current list cell, we'd have to have some kluge to continue the
* list scan anyway.
*/
if (otherrel == NULL)
continue;
- Assert(otherrel->relid == rti); /* sanity check on array */
+ Assert(otherrel->relid == rti); /* sanity check on array */
/* no point in processing target rel itself */
if (otherrel == rel)
/*
* Likewise remove references from SpecialJoinInfo data structures.
*
- * This is relevant in case the outer join we're deleting is nested
- * inside other outer joins: the upper joins' relid sets have to be
- * adjusted. The RHS of the target outer join will be made empty here,
- * but that's OK since caller will delete that SpecialJoinInfo entirely.
+ * This is relevant in case the outer join we're deleting is nested inside
+ * other outer joins: the upper joins' relid sets have to be adjusted.
+ * The RHS of the target outer join will be made empty here, but that's OK
+ * since caller will delete that SpecialJoinInfo entirely.
*/
foreach(l, root->join_info_list)
{
PlaceHolderInfo *phinfo = (PlaceHolderInfo *) lfirst(l);
phinfo->ph_eval_at = bms_del_member(phinfo->ph_eval_at, relid);
- if (bms_is_empty(phinfo->ph_eval_at)) /* oops, belay that */
+ if (bms_is_empty(phinfo->ph_eval_at)) /* oops, belay that */
phinfo->ph_eval_at = bms_add_member(phinfo->ph_eval_at, relid);
phinfo->ph_needed = bms_del_member(phinfo->ph_needed, relid);
else if (IsA(jlnode, List))
{
/* Recurse to handle subproblem */
- List *sublist;
+ List *sublist;
sublist = remove_rel_from_joinlist((List *) jlnode,
relid, nremoved);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.52 2010/05/10 16:25:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.53 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
RowCompareExpr *rc = (RowCompareExpr *) qual;
/*
- * Examine just the first column of the rowcompare, which is
- * what determines its placement in the overall qual list.
+ * Examine just the first column of the rowcompare, which is what
+ * determines its placement in the overall qual list.
*/
leftop = (Expr *) linitial(rc->largs);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.118 2010/03/28 22:59:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.119 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
fix_placeholder_eval_levels(root);
/*
- * Remove any useless outer joins. Ideally this would be done during
+ * Remove any useless outer joins. Ideally this would be done during
* jointree preprocessing, but the necessary information isn't available
* until we've built baserel data structures and classified qual clauses.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.72 2010/06/21 00:14:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.73 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
&colnames, &fields);
/* Adjust the generated per-field Vars, but don't insert PHVs */
rcon->need_phvs = false;
- context->sublevels_up = 0; /* to match the expandRTE output */
+ context->sublevels_up = 0; /* to match the expandRTE output */
fields = (List *) replace_rte_variables_mutator((Node *) fields,
context);
rcon->need_phvs = save_need_phvs;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.182 2010/05/11 15:31:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.183 2010/07/06 19:18:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* if this is the parent table, leave copyObject's result alone.
*
* Note: we need to do this even though the executor won't run any
- * permissions checks on the child RTE. The modifiedCols bitmap
- * may be examined for trigger-firing purposes.
+ * permissions checks on the child RTE. The modifiedCols bitmap may
+ * be examined for trigger-firing purposes.
*/
if (childOID != parentOID)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.7 2010/03/28 22:59:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/placeholder.c,v 1.8 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* If any placeholder can be computed at a base rel and is needed above it,
* add it to that rel's targetlist. We have to do this separately from
* fix_placeholder_eval_levels() because join removal happens in between,
- * and can change the ph_eval_at sets. There is essentially the same logic
+ * and can change the ph_eval_at sets. There is essentially the same logic
* in add_placeholders_to_joinrel, but we can't do that part until joinrels
* are formed.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.255 2010/06/30 18:10:23 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.256 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* ... and hand off to ParseFuncOrColumn */
result = ParseFuncOrColumn(pstate,
- fn->funcname,
- targs,
- fn->agg_order,
- fn->agg_star,
- fn->agg_distinct,
- fn->func_variadic,
- fn->over,
- false,
- fn->location);
+ fn->funcname,
+ targs,
+ fn->agg_order,
+ fn->agg_star,
+ fn->agg_distinct,
+ fn->func_variadic,
+ fn->over,
+ false,
+ fn->location);
/*
* pg_get_expr() is a system function that exposes the expression
- * deparsing functionality in ruleutils.c to users. Very handy, but
- * it was later realized that the functions in ruleutils.c don't check
- * the input rigorously, assuming it to come from system catalogs and
- * to therefore be valid. That makes it easy for a user to crash the
- * backend by passing a maliciously crafted string representation of
- * an expression to pg_get_expr().
+ * deparsing functionality in ruleutils.c to users. Very handy, but it was
+ * later realized that the functions in ruleutils.c don't check the input
+ * rigorously, assuming it to come from system catalogs and to therefore
+ * be valid. That makes it easy for a user to crash the backend by passing
+ * a maliciously crafted string representation of an expression to
+ * pg_get_expr().
*
* There's a lot of code in ruleutils.c, so it's not feasible to add
- * water-proof input checking after the fact. Even if we did it once,
- * it would need to be taken into account in any future patches too.
+ * water-proof input checking after the fact. Even if we did it once, it
+ * would need to be taken into account in any future patches too.
*
* Instead, we restrict pg_rule_expr() to only allow input from system
* catalogs instead. This is a hack, but it's the most robust and easiest
* to backpatch way of plugging the vulnerability.
*
* This is transparent to the typical usage pattern of
- * "pg_get_expr(systemcolumn, ...)", but will break
- * "pg_get_expr('foo', ...)", even if 'foo' is a valid expression fetched
- * earlier from a system catalog. Hopefully there's isn't many clients
- * doing that out there.
+ * "pg_get_expr(systemcolumn, ...)", but will break "pg_get_expr('foo',
+ * ...)", even if 'foo' is a valid expression fetched earlier from a
+ * system catalog. Hopefully there's isn't many clients doing that out
+ * there.
*/
- if (result && IsA(result, FuncExpr) && !superuser())
+ if (result && IsA(result, FuncExpr) &&!superuser())
{
- FuncExpr *fe = (FuncExpr *) result;
+ FuncExpr *fe = (FuncExpr *) result;
+
if (fe->funcid == F_PG_GET_EXPR || fe->funcid == F_PG_GET_EXPR_EXT)
{
- Expr *arg = linitial(fe->args);
- bool allowed = false;
+ Expr *arg = linitial(fe->args);
+ bool allowed = false;
/*
- * Check that the argument came directly from one of the
- * allowed system catalog columns
+ * Check that the argument came directly from one of the allowed
+ * system catalog columns
*/
if (IsA(arg, Var))
{
- Var *var = (Var *) arg;
+ Var *var = (Var *) arg;
RangeTblEntry *rte;
rte = GetRTEByRangeTablePosn(pstate,
var->varno, var->varlevelsup);
- switch(rte->relid)
+ switch (rte->relid)
{
case IndexRelationId:
if (var->varattno == Anum_pg_index_indexprs ||
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.41 2010/05/09 02:15:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.42 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* We avoid using %.*s here because it can misbehave if the data
* is not valid in what libc thinks is the prevailing encoding.
*/
- char buf[NAMEDATALEN];
+ char buf[NAMEDATALEN];
memcpy(buf, ident, len);
buf[len] = '\0';
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.56 2010/05/01 22:46:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.57 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return NULL;
/*
- * Some BSD-derived kernels are known to return EINVAL, not EEXIST,
- * if there is an existing segment but it's smaller than "size"
- * (this is a result of poorly-thought-out ordering of error tests).
- * To distinguish between collision and invalid size in such cases,
- * we make a second try with size = 0. These kernels do not test
- * size against SHMMIN in the preexisting-segment case, so we will
- * not get EINVAL a second time if there is such a segment.
+ * Some BSD-derived kernels are known to return EINVAL, not EEXIST, if
+ * there is an existing segment but it's smaller than "size" (this is
+ * a result of poorly-thought-out ordering of error tests). To
+ * distinguish between collision and invalid size in such cases, we
+ * make a second try with size = 0. These kernels do not test size
+ * against SHMMIN in the preexisting-segment case, so we will not get
+ * EINVAL a second time if there is such a segment.
*/
if (errno == EINVAL)
{
- int save_errno = errno;
+ int save_errno = errno;
shmid = shmget(memKey, 0, IPC_CREAT | IPC_EXCL | IPCProtection);
{
/*
* On most platforms we cannot get here because SHMMIN is
- * greater than zero. However, if we do succeed in creating
- * a zero-size segment, free it and then fall through to
- * report the original error.
+ * greater than zero. However, if we do succeed in creating a
+ * zero-size segment, free it and then fall through to report
+ * the original error.
*/
if (shmctl(shmid, IPC_RMID, NULL) < 0)
elog(LOG, "shmctl(%d, %d, 0) failed: %m",
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.26 2010/02/26 02:00:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.27 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
r = WSASend(writefds->fd_array[i], &buf, 1, &sent, 0, NULL, NULL);
if (r == 0) /* Completed - means things are fine! */
- FD_SET (writefds->fd_array[i], &outwritefds);
+ FD_SET(writefds->fd_array[i], &outwritefds);
else
{ /* Not completed */
* Not completed, and not just "would block", so an error
* occured
*/
- FD_SET (writefds->fd_array[i], &outwritefds);
+ FD_SET(writefds->fd_array[i], &outwritefds);
}
}
if (outwritefds.fd_count > 0)
(resEvents.lNetworkEvents & FD_ACCEPT) ||
(resEvents.lNetworkEvents & FD_CLOSE))
{
- FD_SET (sockets[i], &outreadfds);
+ FD_SET(sockets[i], &outreadfds);
nummatches++;
}
if ((resEvents.lNetworkEvents & FD_WRITE) ||
(resEvents.lNetworkEvents & FD_CLOSE))
{
- FD_SET (sockets[i], &outwritefds);
+ FD_SET(sockets[i], &outwritefds);
nummatches++;
}
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.18 2010/01/02 16:57:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.19 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct itimerval value;
HANDLE event;
CRITICAL_SECTION crit_sec;
-} timerCA;
+} timerCA;
static timerCA timerCommArea;
static HANDLE timerThreadHandle = INVALID_HANDLE_VALUE;
*
* Copyright (c) 2001-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.203 2010/03/24 16:07:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.204 2010/07/06 19:18:57 momjian Exp $
* ----------
*/
#include "postgres.h"
for (;;) /* need a loop to handle EINTR */
{
FD_ZERO(&rset);
- FD_SET (pgStatSock, &rset);
+ FD_SET(pgStatSock, &rset);
tv.tv_sec = 0;
tv.tv_usec = 500000;
got_data = (input_fd.revents != 0);
#else /* !HAVE_POLL */
- FD_SET (pgStatSock, &rfds);
+ FD_SET(pgStatSock, &rfds);
/*
* timeout struct is modified by select() on some operating systems,
last_statwrite = globalStats.stats_timestamp;
/*
- * If there is clock skew between backends and the collector, we
- * could receive a stats request time that's in the future. If so,
- * complain and reset last_statrequest. Resetting ensures that no
- * inquiry message can cause more than one stats file write to occur.
+ * If there is clock skew between backends and the collector, we could
+ * receive a stats request time that's in the future. If so, complain
+ * and reset last_statrequest. Resetting ensures that no inquiry
+ * message can cause more than one stats file write to occur.
*/
if (last_statrequest > last_statwrite)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.613 2010/06/24 16:40:45 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.614 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
*
*
* When the startup process is ready to start archive recovery, it signals the
* postmaster, and we switch to PM_RECOVERY state. The background writer is
- * launched, while the startup process continues applying WAL. If Hot Standby
+ * launched, while the startup process continues applying WAL. If Hot Standby
* is enabled, then, after reaching a consistent point in WAL redo, startup
* process signals us again, and we switch to PM_HOT_STANDBY state and
* begin accepting connections to perform read-only queries. When archive
static PMState pmState = PM_INIT;
-static bool ReachedNormalRunning = false; /* T if we've reached PM_RUN */
+static bool ReachedNormalRunning = false; /* T if we've reached PM_RUN */
bool ClientAuthInProgress = false; /* T during new-client
* authentication */
HANDLE waitHandle;
HANDLE procHandle;
DWORD procId;
-} win32_deadchild_waitinfo;
+} win32_deadchild_waitinfo;
HANDLE PostmasterHandle;
#endif
SOCKET origsocket; /* Original socket value, or PGINVALID_SOCKET
* if not a socket */
WSAPROTOCOL_INFO wsainfo;
-} InheritableSocket;
+} InheritableSocket;
#else
typedef int InheritableSocket;
#endif
char my_exec_path[MAXPGPATH];
char pkglib_path[MAXPGPATH];
char ExtraOptions[MAXPGPATH];
-} BackendParameters;
+} BackendParameters;
static void read_backend_variables(char *id, Port *port);
-static void restore_backend_variables(BackendParameters *param, Port *port);
+static void restore_backend_variables(BackendParameters * param, Port *port);
#ifndef WIN32
-static bool save_backend_variables(BackendParameters *param, Port *port);
+static bool save_backend_variables(BackendParameters * param, Port *port);
#else
-static bool save_backend_variables(BackendParameters *param, Port *port,
+static bool save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid);
#endif
if (fd == PGINVALID_SOCKET)
break;
- FD_SET (fd, rmask);
+ FD_SET(fd, rmask);
if (fd > maxsock)
maxsock = fd;
/* and the walwriter too */
if (WalWriterPID != 0)
signal_child(WalWriterPID, SIGTERM);
+
/*
* If we're in recovery, we can't kill the startup process
* right away, because at present doing so does not release
* Terminate backup mode to avoid recovery after a clean fast
* shutdown. Since a backup can only be taken during normal
* running (and not, for example, while running under Hot Standby)
- * it only makes sense to do this if we reached normal running.
- * If we're still in recovery, the backup file is one we're
+ * it only makes sense to do this if we reached normal running. If
+ * we're still in recovery, the backup file is one we're
* recovering *from*, and we must keep it around so that recovery
* restarts from the right place.
*/
{
if (remote_port[0])
ereport(LOG,
- (errmsg("connection received: host=%s port=%s",
- remote_host,
- remote_port)));
+ (errmsg("connection received: host=%s port=%s",
+ remote_host,
+ remote_port)));
else
ereport(LOG,
- (errmsg("connection received: host=%s",
- remote_host)));
+ (errmsg("connection received: host=%s",
+ remote_host)));
}
/*
#define read_inheritable_socket(dest, src) (*(dest) = *(src))
#else
static bool write_duplicated_handle(HANDLE *dest, HANDLE src, HANDLE child);
-static bool write_inheritable_socket(InheritableSocket *dest, SOCKET src,
+static bool write_inheritable_socket(InheritableSocket * dest, SOCKET src,
pid_t childPid);
-static void read_inheritable_socket(SOCKET *dest, InheritableSocket *src);
+static void read_inheritable_socket(SOCKET * dest, InheritableSocket * src);
#endif
/* Save critical backend variables into the BackendParameters struct */
#ifndef WIN32
static bool
-save_backend_variables(BackendParameters *param, Port *port)
+save_backend_variables(BackendParameters * param, Port *port)
#else
static bool
-save_backend_variables(BackendParameters *param, Port *port,
+save_backend_variables(BackendParameters * param, Port *port,
HANDLE childProcess, pid_t childPid)
#endif
{
* straight socket inheritance.
*/
static bool
-write_inheritable_socket(InheritableSocket *dest, SOCKET src, pid_t childpid)
+write_inheritable_socket(InheritableSocket * dest, SOCKET src, pid_t childpid)
{
dest->origsocket = src;
if (src != 0 && src != PGINVALID_SOCKET)
* Read a duplicate socket structure back, and get the socket descriptor.
*/
static void
-read_inheritable_socket(SOCKET *dest, InheritableSocket *src)
+read_inheritable_socket(SOCKET * dest, InheritableSocket * src)
{
SOCKET s;
/* Restore critical backend variables from the BackendParameters struct */
static void
-restore_backend_variables(BackendParameters *param, Port *port)
+restore_backend_variables(BackendParameters * param, Port *port)
{
memcpy(port, ¶m->port, sizeof(Port));
read_inheritable_socket(&port->sock, ¶m->portsocket);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.57 2010/04/16 09:51:49 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.58 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Wait for some data, timing out after 1 second
*/
FD_ZERO(&rfds);
- FD_SET (syslogPipe[0], &rfds);
+ FD_SET(syslogPipe[0], &rfds);
timeout.tv_sec = 1;
timeout.tv_usec = 0;
* detect pipe EOF. The main thread just wakes up once a second to
* check for SIGHUP and rotation conditions.
*
- * Server code isn't generally thread-safe, so we ensure that only
- * one of the threads is active at a time by entering the critical
- * section whenever we're not sleeping.
+ * Server code isn't generally thread-safe, so we ensure that only one
+ * of the threads is active at a time by entering the critical section
+ * whenever we're not sleeping.
*/
LeaveCriticalSection(&sysloggerSection);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.11 2010/06/11 10:13:09 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/replication/libpqwalreceiver/libpqwalreceiver.c,v 1.12 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char cmd[64];
/*
- * Connect using deliberately undocumented parameter: replication.
- * The database name is ignored by the server in replication mode, but
- * specify "replication" for .pgpass lookup.
+ * Connect using deliberately undocumented parameter: replication. The
+ * database name is ignored by the server in replication mode, but specify
+ * "replication" for .pgpass lookup.
*/
snprintf(conninfo_repl, sizeof(conninfo_repl),
"%s dbname=replication replication=true",
justconnected = true;
ereport(LOG,
- (errmsg("streaming replication successfully connected to primary")));
+ (errmsg("streaming replication successfully connected to primary")));
return true;
}
struct timeval *ptr_timeout;
FD_ZERO(&input_mask);
- FD_SET (PQsocket(streamConn), &input_mask);
+ FD_SET(PQsocket(streamConn), &input_mask);
if (timeout_ms < 0)
ptr_timeout = NULL;
static PGresult *
libpqrcv_PQexec(const char *query)
{
- PGresult *result = NULL;
- PGresult *lastResult = NULL;
+ PGresult *result = NULL;
+ PGresult *lastResult = NULL;
/*
- * PQexec() silently discards any prior query results on the
- * connection. This is not required for walreceiver since it's
- * expected that walsender won't generate any such junk results.
+ * PQexec() silently discards any prior query results on the connection.
+ * This is not required for walreceiver since it's expected that walsender
+ * won't generate any such junk results.
*/
/*
- * Submit a query. Since we don't use non-blocking mode, this also
- * can block. But its risk is relatively small, so we ignore that
- * for now.
+ * Submit a query. Since we don't use non-blocking mode, this also can
+ * block. But its risk is relatively small, so we ignore that for now.
*/
if (!PQsendQuery(streamConn, query))
return NULL;
for (;;)
{
/*
- * Receive data until PQgetResult is ready to get the result
- * without blocking.
+ * Receive data until PQgetResult is ready to get the result without
+ * blocking.
*/
while (PQisBusy(streamConn))
{
/*
* We don't need to break down the sleep into smaller increments,
* and check for interrupts after each nap, since we can just
- * elog(FATAL) within SIGTERM signal handler if the signal
- * arrives in the middle of establishment of replication connection.
+ * elog(FATAL) within SIGTERM signal handler if the signal arrives
+ * in the middle of establishment of replication connection.
*/
if (!libpq_select(-1))
continue; /* interrupted */
}
/*
- * Emulate the PQexec()'s behavior of returning the last result
- * when there are many.
- * Since walsender will never generate multiple results, we skip
- * the concatenation of error messages.
+ * Emulate the PQexec()'s behavior of returning the last result when
+ * there are many. Since walsender will never generate multiple
+ * results, we skip the concatenation of error messages.
*/
result = PQgetResult(streamConn);
if (result == NULL)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.15 2010/07/03 20:43:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/replication/walreceiver.c,v 1.16 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
char conninfo[MAXCONNINFO];
XLogRecPtr startpoint;
+
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.6 2010/07/03 20:43:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/replication/walreceiverfuncs.c,v 1.7 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Returns the last+1 byte position that walreceiver has written.
*
* Optionally, returns the previous chunk start, that is the first byte
- * written in the most recent walreceiver flush cycle. Callers not
+ * written in the most recent walreceiver flush cycle. Callers not
* interested in that value may pass NULL for latestChunkStart.
*/
XLogRecPtr
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.27 2010/06/17 16:41:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/replication/walsender.c,v 1.28 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int max_wal_senders = 0; /* the maximum number of concurrent walsenders */
int WalSndDelay = 200; /* max sleep time between some actions */
-#define NAPTIME_PER_CYCLE 100000L /* max sleep time between cycles (100ms) */
+#define NAPTIME_PER_CYCLE 100000L /* max sleep time between cycles
+ * (100ms) */
/*
* These variables are used similarly to openLogFile/Id/Seg/Off,
* NOTE: This only checks the current value of
* wal_level. Even if the current setting is not
* 'minimal', there can be old WAL in the pg_xlog
- * directory that was created with 'minimal'.
- * So this is not bulletproof, the purpose is
- * just to give a user-friendly error message that
- * hints how to configure the system correctly.
+ * directory that was created with 'minimal'. So this
+ * is not bulletproof, the purpose is just to give a
+ * user-friendly error message that hints how to
+ * configure the system correctly.
*/
if (wal_level == WAL_LEVEL_MINIMAL)
ereport(FATAL,
/* Loop forever, unless we get an error */
for (;;)
{
- long remain; /* remaining time (us) */
+ long remain; /* remaining time (us) */
/*
* Emergency bailout if postmaster has died. This is to avoid the
*
* On some platforms, signals won't interrupt the sleep. To ensure we
* respond reasonably promptly when someone signals us, break down the
- * sleep into NAPTIME_PER_CYCLE increments, and check for
- * interrupts after each nap.
+ * sleep into NAPTIME_PER_CYCLE increments, and check for interrupts
+ * after each nap.
*/
if (caughtup)
{
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("number of requested standby connections "
- "exceeds max_wal_senders (currently %d)",
- max_wal_senders)));
+ "exceeds max_wal_senders (currently %d)",
+ max_wal_senders)));
/* Arrange to clean up at walsender exit */
on_shmem_exit(WalSndKill, 0);
if (sendFile < 0)
{
/*
- * If the file is not found, assume it's because the
- * standby asked for a too old WAL segment that has already
- * been removed or recycled.
+ * If the file is not found, assume it's because the standby
+ * asked for a too old WAL segment that has already been
+ * removed or recycled.
*/
if (errno == ENOENT)
{
- char filename[MAXFNAMELEN];
+ char filename[MAXFNAMELEN];
+
XLogFileName(filename, ThisTimeLineID, sendId, sendSeg);
ereport(ERROR,
(errcode_for_file_access(),
}
/*
- * After reading into the buffer, check that what we read was valid.
- * We do this after reading, because even though the segment was present
- * when we opened it, it might get recycled or removed while we read it.
- * The read() succeeds in that case, but the data we tried to read might
+ * After reading into the buffer, check that what we read was valid. We do
+ * this after reading, because even though the segment was present when we
+ * opened it, it might get recycled or removed while we read it. The
+ * read() succeeds in that case, but the data we tried to read might
* already have been overwritten with new WAL records.
*/
XLogGetLastRemoved(&lastRemovedLog, &lastRemovedSeg);
if (log < lastRemovedLog ||
(log == lastRemovedLog && seg <= lastRemovedSeg))
{
- char filename[MAXFNAMELEN];
+ char filename[MAXFNAMELEN];
+
XLogFileName(filename, ThisTimeLineID, log, seg);
ereport(ERROR,
(errcode_for_file_access(),
WalDataMessageHeader msghdr;
/*
- * Attempt to send all data that's already been written out and fsync'd
- * to disk. We cannot go further than what's been written out given the
+ * Attempt to send all data that's already been written out and fsync'd to
+ * disk. We cannot go further than what's been written out given the
* current implementation of XLogRead(). And in any case it's unsafe to
* send WAL that is not securely down to disk on the master: if the master
* subsequently crashes and restarts, slaves must not have applied any WAL
* MAX_SEND_SIZE bytes to send, send everything. Otherwise send
* MAX_SEND_SIZE bytes, but round back to logfile or page boundary.
*
- * The rounding is not only for performance reasons. Walreceiver
- * relies on the fact that we never split a WAL record across two
- * messages. Since a long WAL record is split at page boundary into
- * continuation records, page boundary is always a safe cut-off point.
- * We also assume that SendRqstPtr never points to the middle of a WAL
- * record.
+ * The rounding is not only for performance reasons. Walreceiver relies on
+ * the fact that we never split a WAL record across two messages. Since a
+ * long WAL record is split at page boundary into continuation records,
+ * page boundary is always a safe cut-off point. We also assume that
+ * SendRqstPtr never points to the middle of a WAL record.
*/
startptr = sentPtr;
if (startptr.xrecoff >= XLogFileSize)
{
/*
- * crossing a logid boundary, skip the non-existent last log
- * segment in previous logical log file.
+ * crossing a logid boundary, skip the non-existent last log segment
+ * in previous logical log file.
*/
startptr.xlogid += 1;
startptr.xrecoff = 0;
XLogRead(msgbuf + 1 + sizeof(WalDataMessageHeader), startptr, nbytes);
/*
- * We fill the message header last so that the send timestamp is taken
- * as late as possible.
+ * We fill the message header last so that the send timestamp is taken as
+ * late as possible.
*/
msghdr.dataStart = startptr;
msghdr.walEnd = SendRqstPtr;
}
return oldest;
}
+
#endif
* as a service.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/copydir.c,v 1.1 2010/07/02 17:03:30 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/copydir.c,v 1.2 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
struct stat fst;
- /* If we got a cancel signal during the copy of the directory, quit */
- CHECK_FOR_INTERRUPTS();
+ /* If we got a cancel signal during the copy of the directory, quit */
+ CHECK_FOR_INTERRUPTS();
if (strcmp(xlde->d_name, ".") == 0 ||
strcmp(xlde->d_name, "..") == 0)
*/
for (offset = 0;; offset += nbytes)
{
- /* If we got a cancel signal during the copy of the file, quit */
- CHECK_FOR_INTERRUPTS();
+ /* If we got a cancel signal during the copy of the file, quit */
+ CHECK_FOR_INTERRUPTS();
nbytes = read(srcfd, buffer, COPY_BUF_SIZE);
if (nbytes < 0)
fsync_fname(char *fname, bool isdir)
{
int fd;
- int returncode;
+ int returncode;
/*
- * Some OSs require directories to be opened read-only whereas
- * other systems don't allow us to fsync files opened read-only; so
- * we need both cases here
+ * Some OSs require directories to be opened read-only whereas other
+ * systems don't allow us to fsync files opened read-only; so we need both
+ * cases here
*/
if (!isdir)
fd = BasicOpenFile(fname,
S_IRUSR | S_IWUSR);
/*
- * Some OSs don't allow us to open directories at all
- * (Windows returns EACCES)
+ * Some OSs don't allow us to open directories at all (Windows returns
+ * EACCES)
*/
if (fd < 0 && isdir && (errno == EISDIR || errno == EACCES))
return;
errmsg("could not open file \"%s\": %m", fname)));
returncode = pg_fsync(fd);
-
+
/* Some OSs don't allow us to fsync directories at all */
if (returncode != 0 && isdir && errno == EBADF)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.107 2010/03/20 00:58:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.108 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
CritSectionCount = 0;
/*
- * Also clear the error context stack, to prevent error callbacks
- * from being invoked by any elog/ereport calls made during proc_exit.
- * Whatever context they might want to offer is probably not relevant,
- * and in any case they are likely to fail outright after we've done
- * things like aborting any open transaction. (In normal exit scenarios
- * the context stack should be empty anyway, but it might not be in the
- * case of elog(FATAL) for example.)
+ * Also clear the error context stack, to prevent error callbacks from
+ * being invoked by any elog/ereport calls made during proc_exit. Whatever
+ * context they might want to offer is probably not relevant, and in any
+ * case they are likely to fail outright after we've done things like
+ * aborting any open transaction. (In normal exit scenarios the context
+ * stack should be empty anyway, but it might not be in the case of
+ * elog(FATAL) for example.)
*/
error_context_stack = NULL;
/* For the same reason, reset debug_query_string before it's clobbered */
*
* During hot standby, we also keep a list of XIDs representing transactions
* that are known to be running in the master (or more precisely, were running
- * as of the current point in the WAL stream). This list is kept in the
+ * as of the current point in the WAL stream). This list is kept in the
* KnownAssignedXids array, and is updated by watching the sequence of
* arriving XIDs. This is necessary because if we leave those XIDs out of
* snapshots taken for standby queries, then they will appear to be already
- * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
+ * complete, leading to MVCC failures. Note that in hot standby, the PGPROC
* array represents standby processes, which by definition are not running
* transactions that have XIDs.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.71 2010/07/03 21:23:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.72 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int numKnownAssignedXids; /* currrent # of valid entries */
int tailKnownAssignedXids; /* index of oldest valid element */
int headKnownAssignedXids; /* index of newest element, + 1 */
- slock_t known_assigned_xids_lck; /* protects head/tail pointers */
+ slock_t known_assigned_xids_lck; /* protects head/tail pointers */
/*
* Highest subxid that has been removed from KnownAssignedXids array to
/* Primitives for KnownAssignedXids array handling for standby */
static void KnownAssignedXidsCompress(bool force);
static void KnownAssignedXidsAdd(TransactionId from_xid, TransactionId to_xid,
- bool exclusive_lock);
+ bool exclusive_lock);
static bool KnownAssignedXidsSearch(TransactionId xid, bool remove);
static bool KnownAssignedXidExists(TransactionId xid);
static void KnownAssignedXidsRemove(TransactionId xid);
static void KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids,
- TransactionId *subxids);
+ TransactionId *subxids);
static void KnownAssignedXidsRemovePreceding(TransactionId xid);
-static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
+static int KnownAssignedXidsGet(TransactionId *xarray, TransactionId xmax);
static int KnownAssignedXidsGetAndSetXmin(TransactionId *xarray,
- TransactionId *xmin,
- TransactionId xmax);
+ TransactionId *xmin,
+ TransactionId xmax);
static void KnownAssignedXidsDisplay(int trace_level);
/*
* since we may at times copy the whole of the data structures around. We
* refer to this size as TOTAL_MAX_CACHED_SUBXIDS.
*
- * Ideally we'd only create this structure if we were actually doing
- * hot standby in the current run, but we don't know that yet at the
- * time shared memory is being set up.
+ * Ideally we'd only create this structure if we were actually doing hot
+ * standby in the current run, but we don't know that yet at the time
+ * shared memory is being set up.
*/
#define TOTAL_MAX_CACHED_SUBXIDS \
((PGPROC_MAX_CACHED_SUBXIDS + 1) * PROCARRAY_MAXPROCS)
ProcArrayApplyRecoveryInfo(RunningTransactions running)
{
TransactionId *xids;
- int nxids;
+ int nxids;
TransactionId nextXid;
- int i;
+ int i;
Assert(standbyState >= STANDBY_INITIALIZED);
Assert(TransactionIdIsValid(running->nextXid));
else
elog(trace_recovery(DEBUG2),
"recovery snapshot waiting for %u oldest active xid on standby is %u",
- standbySnapshotPendingXmin,
- running->oldestRunningXid);
+ standbySnapshotPendingXmin,
+ running->oldestRunningXid);
return;
}
/*
* Remove all xids except xids later than the snapshot. We don't know
- * exactly which ones that is until precisely now, so that is why we
- * allow xids to be added only to remove most of them again here.
+ * exactly which ones that is until precisely now, so that is why we allow
+ * xids to be added only to remove most of them again here.
*/
ExpireOldKnownAssignedTransactionIds(running->nextXid);
StandbyReleaseOldLocks(running->nextXid);
/*
* Combine the running xact data with already known xids, if any exist.
- * KnownAssignedXids is sorted so we cannot just add new xids, we have
- * to combine them first, sort them and then re-add to KnownAssignedXids.
+ * KnownAssignedXids is sorted so we cannot just add new xids, we have to
+ * combine them first, sort them and then re-add to KnownAssignedXids.
*
- * Some of the new xids are top-level xids and some are subtransactions. We
- * don't call SubtransSetParent because it doesn't matter yet. If we aren't
- * overflowed then all xids will fit in snapshot and so we don't need
- * subtrans. If we later overflow, an xid assignment record will add xids
- * to subtrans. If RunningXacts is overflowed then we don't have enough
- * information to correctly update subtrans anyway.
+ * Some of the new xids are top-level xids and some are subtransactions.
+ * We don't call SubtransSetParent because it doesn't matter yet. If we
+ * aren't overflowed then all xids will fit in snapshot and so we don't
+ * need subtrans. If we later overflow, an xid assignment record will add
+ * xids to subtrans. If RunningXacts is overflowed then we don't have
+ * enough information to correctly update subtrans anyway.
*/
/*
- * Allocate a temporary array so we can combine xids. The total
- * of both arrays should never normally exceed TOTAL_MAX_CACHED_SUBXIDS.
+ * Allocate a temporary array so we can combine xids. The total of both
+ * arrays should never normally exceed TOTAL_MAX_CACHED_SUBXIDS.
*/
xids = palloc(sizeof(TransactionId) * TOTAL_MAX_CACHED_SUBXIDS);
/*
- * Get the remaining KnownAssignedXids. In most cases there won't
- * be any at all since this exists only to catch a theoretical
- * race condition.
+ * Get the remaining KnownAssignedXids. In most cases there won't be any
+ * at all since this exists only to catch a theoretical race condition.
*/
nxids = KnownAssignedXidsGet(xids, InvalidTransactionId);
if (nxids > 0)
KnownAssignedXidsDisplay(trace_recovery(DEBUG3));
/*
- * Now we have a copy of any KnownAssignedXids we can zero the
- * array before we re-insertion of combined snapshot.
+ * Now we have a copy of any KnownAssignedXids we can zero the array
+ * before we re-insertion of combined snapshot.
*/
KnownAssignedXidsRemovePreceding(InvalidTransactionId);
/*
- * Add to the temp array any xids which have not already completed,
- * taking care not to overflow in extreme cases.
+ * Add to the temp array any xids which have not already completed, taking
+ * care not to overflow in extreme cases.
*/
for (i = 0; i < running->xcnt; i++)
{
if (nxids > 0)
{
/*
- * Sort the array so that we can add them safely into KnownAssignedXids.
+ * Sort the array so that we can add them safely into
+ * KnownAssignedXids.
*/
qsort(xids, nxids, sizeof(TransactionId), xidComparator);
pfree(xids);
/*
- * Now we've got the running xids we need to set the global values
- * thare used to track snapshots as they evolve further
+ * Now we've got the running xids we need to set the global values thare
+ * used to track snapshots as they evolve further
*
- * * latestCompletedXid which will be the xmax for snapshots
- * * lastOverflowedXid which shows whether snapshots overflow
- * * nextXid
+ * * latestCompletedXid which will be the xmax for snapshots *
+ * lastOverflowedXid which shows whether snapshots overflow * nextXid
*
* If the snapshot overflowed, then we still initialise with what we know,
* but the recovery snapshot isn't fully valid yet because we know there
- * are some subxids missing.
- * We don't know the specific subxids that are missing, so conservatively
- * assume the last one is latestObservedXid. If no missing subxids,
- * try to clear lastOverflowedXid.
+ * are some subxids missing. We don't know the specific subxids that are
+ * missing, so conservatively assume the last one is latestObservedXid.
+ * If no missing subxids, try to clear lastOverflowedXid.
*
* If the snapshot didn't overflow it's still possible that an overflow
- * occurred in the gap between taking snapshot and logging record, so
- * we also need to check if lastOverflowedXid is already ahead of us.
+ * occurred in the gap between taking snapshot and logging record, so we
+ * also need to check if lastOverflowedXid is already ahead of us.
*/
if (running->subxid_overflow)
{
procArray->lastOverflowedXid = latestObservedXid;
}
else if (TransactionIdFollows(procArray->lastOverflowedXid,
- latestObservedXid))
+ latestObservedXid))
{
standbyState = STANDBY_SNAPSHOT_PENDING;
standbySnapshotPendingXmin = InvalidTransactionId;
if (TransactionIdFollows(running->oldestRunningXid,
- procArray->lastOverflowedXid))
+ procArray->lastOverflowedXid))
procArray->lastOverflowedXid = InvalidTransactionId;
}
/*
* If the KnownAssignedXids overflowed, we have to check pg_subtrans
- * too. Fetch all xids from KnownAssignedXids that are lower than xid,
- * since if xid is a subtransaction its parent will always have a
- * lower value. Note we will collect both main and subXIDs here,
- * but there's no help for it.
+ * too. Fetch all xids from KnownAssignedXids that are lower than
+ * xid, since if xid is a subtransaction its parent will always have a
+ * lower value. Note we will collect both main and subXIDs here, but
+ * there's no help for it.
*/
if (TransactionIdPrecedesOrEquals(xid, procArray->lastOverflowedXid))
nxids = KnownAssignedXidsGet(xids, xid);
LWLockRelease(ProcArrayLock);
/*
- * Compute the cutoff XID, being careful not to generate a "permanent" XID.
+ * Compute the cutoff XID, being careful not to generate a "permanent"
+ * XID.
*
* vacuum_defer_cleanup_age provides some additional "slop" for the
* benefit of hot standby queries on slave servers. This is quick and
* dirty, and perhaps not all that useful unless the master has a
- * predictable transaction rate, but it's what we've got. Note that
- * we are assuming vacuum_defer_cleanup_age isn't large enough to cause
- * wraparound --- so guc.c should limit it to no more than the xidStopLimit
- * threshold in varsup.c.
+ * predictable transaction rate, but it's what we've got. Note that we
+ * are assuming vacuum_defer_cleanup_age isn't large enough to cause
+ * wraparound --- so guc.c should limit it to no more than the
+ * xidStopLimit threshold in varsup.c.
*/
result -= vacuum_defer_cleanup_age;
if (!TransactionIdIsNormal(result))
/*
* If we're in recovery then snapshot data comes from a different place,
- * so decide which route we take before grab the lock. It is possible
- * for recovery to end before we finish taking snapshot, and for newly
+ * so decide which route we take before grab the lock. It is possible for
+ * recovery to end before we finish taking snapshot, and for newly
* assigned transaction ids to be added to the procarray. Xmax cannot
* change while we hold ProcArrayLock, so those newly added transaction
* ids would be filtered away, so we need not be concerned about them.
if (!snapshot->takenDuringRecovery)
{
/*
- * Spin over procArray checking xid, xmin, and subxids. The goal is to
- * gather all active xids, find the lowest xmin, and try to record
+ * Spin over procArray checking xid, xmin, and subxids. The goal is
+ * to gather all active xids, find the lowest xmin, and try to record
* subxids. During recovery no xids will be assigned, so all normal
* backends can be ignored, nor are there any VACUUMs running. All
* prepared transaction xids are held in KnownAssignedXids, so these
continue;
/* Update globalxmin to be the smallest valid xmin */
- xid = proc->xmin; /* fetch just once */
+ xid = proc->xmin; /* fetch just once */
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, globalxmin))
globalxmin = xid;
xid = proc->xid;
/*
- * If the transaction has been assigned an xid < xmax we add it to the
- * snapshot, and update xmin if necessary. There's no need to store
- * XIDs >= xmax, since we'll treat them as running anyway. We don't
- * bother to examine their subxids either.
+ * If the transaction has been assigned an xid < xmax we add it to
+ * the snapshot, and update xmin if necessary. There's no need to
+ * store XIDs >= xmax, since we'll treat them as running anyway.
+ * We don't bother to examine their subxids either.
*
- * We don't include our own XID (if any) in the snapshot, but we must
- * include it into xmin.
+ * We don't include our own XID (if any) in the snapshot, but we
+ * must include it into xmin.
*/
if (TransactionIdIsNormal(xid))
{
}
/*
- * Save subtransaction XIDs if possible (if we've already overflowed,
- * there's no point). Note that the subxact XIDs must be later than
- * their parent, so no need to check them against xmin. We could
- * filter against xmax, but it seems better not to do that much work
- * while holding the ProcArrayLock.
+ * Save subtransaction XIDs if possible (if we've already
+ * overflowed, there's no point). Note that the subxact XIDs must
+ * be later than their parent, so no need to check them against
+ * xmin. We could filter against xmax, but it seems better not to
+ * do that much work while holding the ProcArrayLock.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once. Should
- * be safe to use memcpy, though. (We needn't worry about missing any
- * xids added concurrently, because they must postdate xmax.)
+ * remove any. Hence it's important to fetch nxids just once.
+ * Should be safe to use memcpy, though. (We needn't worry about
+ * missing any xids added concurrently, because they must postdate
+ * xmax.)
*
* Again, our own XIDs are not included in the snapshot.
*/
* us then the conflict assessment made here would never include the snapshot
* that is being derived. So we take LW_SHARED on the ProcArray and allow
* concurrent snapshots when limitXmin is valid. We might think about adding
- * Assert(limitXmin < lowest(KnownAssignedXids))
+ * Assert(limitXmin < lowest(KnownAssignedXids))
* but that would not be true in the case of FATAL errors lagging in array,
* but we already know those are bogus anyway, so we skip that test.
*
* treated as running by standby transactions, even though they are not in
* the standby server's PGPROC array.
*
- * We record all XIDs that we know have been assigned. That includes all the
+ * We record all XIDs that we know have been assigned. That includes all the
* XIDs seen in WAL records, plus all unobserved XIDs that we can deduce have
* been assigned. We can deduce the existence of unobserved XIDs because we
* know XIDs are assigned in sequence, with no gaps. The KnownAssignedXids
*
* During hot standby we do not fret too much about the distinction between
* top-level XIDs and subtransaction XIDs. We store both together in the
- * KnownAssignedXids list. In backends, this is copied into snapshots in
+ * KnownAssignedXids list. In backends, this is copied into snapshots in
* GetSnapshotData(), taking advantage of the fact that XidInMVCCSnapshot()
* doesn't care about the distinction either. Subtransaction XIDs are
* effectively treated as top-level XIDs and in the typical case pg_subtrans
Assert(TransactionIdIsValid(xid));
elog(trace_recovery(DEBUG4), "record known xact %u latestObservedXid %u",
- xid, latestObservedXid);
+ xid, latestObservedXid);
/*
* When a newly observed xid arrives, it is frequently the case that it is
TransactionId next_expected_xid;
/*
- * Extend clog and subtrans like we do in GetNewTransactionId()
- * during normal operation using individual extend steps.
- * Typical case requires almost no activity.
+ * Extend clog and subtrans like we do in GetNewTransactionId() during
+ * normal operation using individual extend steps. Typical case
+ * requires almost no activity.
*/
next_expected_xid = latestObservedXid;
TransactionIdAdvance(next_expected_xid);
*/
void
ExpireTreeKnownAssignedTransactionIds(TransactionId xid, int nsubxids,
- TransactionId *subxids, TransactionId max_xid)
+ TransactionId *subxids, TransactionId max_xid)
{
Assert(standbyState >= STANDBY_INITIALIZED);
* must hold shared ProcArrayLock to examine the array. To remove XIDs from
* the array, the startup process must hold ProcArrayLock exclusively, for
* the usual transactional reasons (compare commit/abort of a transaction
- * during normal running). Compressing unused entries out of the array
+ * during normal running). Compressing unused entries out of the array
* likewise requires exclusive lock. To add XIDs to the array, we just insert
* them into slots to the right of the head pointer and then advance the head
* pointer. This wouldn't require any lock at all, except that on machines
* with weak memory ordering we need to be careful that other processors
* see the array element changes before they see the head pointer change.
* We handle this by using a spinlock to protect reads and writes of the
- * head/tail pointers. (We could dispense with the spinlock if we were to
+ * head/tail pointers. (We could dispense with the spinlock if we were to
* create suitable memory access barrier primitives and use those instead.)
* The spinlock must be taken to read or write the head/tail pointers unless
* the caller holds ProcArrayLock exclusively.
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
- int head, tail;
- int compress_index;
- int i;
+ int head,
+ tail;
+ int compress_index;
+ int i;
/* no spinlock required since we hold ProcArrayLock exclusively */
head = pArray->headKnownAssignedXids;
if (!force)
{
/*
- * If we can choose how much to compress, use a heuristic to
- * avoid compressing too often or not often enough.
+ * If we can choose how much to compress, use a heuristic to avoid
+ * compressing too often or not often enough.
*
- * Heuristic is if we have a large enough current spread and
- * less than 50% of the elements are currently in use, then
- * compress. This should ensure we compress fairly infrequently.
- * We could compress less often though the virtual array would
- * spread out more and snapshots would become more expensive.
+ * Heuristic is if we have a large enough current spread and less than
+ * 50% of the elements are currently in use, then compress. This
+ * should ensure we compress fairly infrequently. We could compress
+ * less often though the virtual array would spread out more and
+ * snapshots would become more expensive.
*/
- int nelements = head - tail;
+ int nelements = head - tail;
if (nelements < 4 * PROCARRAY_MAXPROCS ||
nelements < 2 * pArray->numKnownAssignedXids)
}
/*
- * We compress the array by reading the valid values from tail
- * to head, re-aligning data to 0th element.
+ * We compress the array by reading the valid values from tail to head,
+ * re-aligning data to 0th element.
*/
compress_index = 0;
for (i = tail; i < head; i++)
* If exclusive_lock is true then caller already holds ProcArrayLock in
* exclusive mode, so we need no extra locking here. Else caller holds no
* lock, so we need to be sure we maintain sufficient interlocks against
- * concurrent readers. (Only the startup process ever calls this, so no need
+ * concurrent readers. (Only the startup process ever calls this, so no need
* to worry about concurrent writers.)
*/
static void
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
- TransactionId next_xid;
- int head, tail;
+ TransactionId next_xid;
+ int head,
+ tail;
int nxids;
int i;
Assert(TransactionIdPrecedesOrEquals(from_xid, to_xid));
/*
- * Calculate how many array slots we'll need. Normally this is cheap;
- * in the unusual case where the XIDs cross the wrap point, we do it the
- * hard way.
+ * Calculate how many array slots we'll need. Normally this is cheap; in
+ * the unusual case where the XIDs cross the wrap point, we do it the hard
+ * way.
*/
if (to_xid >= from_xid)
nxids = to_xid - from_xid + 1;
}
/*
- * Since only the startup process modifies the head/tail pointers,
- * we don't need a lock to read them here.
+ * Since only the startup process modifies the head/tail pointers, we
+ * don't need a lock to read them here.
*/
head = pArray->headKnownAssignedXids;
tail = pArray->tailKnownAssignedXids;
Assert(tail >= 0 && tail < pArray->maxKnownAssignedXids);
/*
- * Verify that insertions occur in TransactionId sequence. Note that
- * even if the last existing element is marked invalid, it must still
- * have a correctly sequenced XID value.
+ * Verify that insertions occur in TransactionId sequence. Note that even
+ * if the last existing element is marked invalid, it must still have a
+ * correctly sequenced XID value.
*/
if (head > tail &&
TransactionIdFollowsOrEquals(KnownAssignedXids[head - 1], from_xid))
* ensure that other processors see the above array updates before they
* see the head pointer change.
*
- * If we're holding ProcArrayLock exclusively, there's no need to take
- * the spinlock.
+ * If we're holding ProcArrayLock exclusively, there's no need to take the
+ * spinlock.
*/
if (exclusive_lock)
pArray->headKnownAssignedXids = head;
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
- int first, last;
- int head;
- int tail;
- int result_index = -1;
+ int first,
+ last;
+ int head;
+ int tail;
+ int result_index = -1;
if (remove)
{
}
/*
- * Standard binary search. Note we can ignore the KnownAssignedXidsValid
+ * Standard binary search. Note we can ignore the KnownAssignedXidsValid
* array here, since even invalid entries will contain sorted XIDs.
*/
first = tail;
last = head - 1;
while (first <= last)
{
- int mid_index;
- TransactionId mid_xid;
+ int mid_index;
+ TransactionId mid_xid;
mid_index = (first + last) / 2;
mid_xid = KnownAssignedXids[mid_index];
/*
* Note: we cannot consider it an error to remove an XID that's not
* present. We intentionally remove subxact IDs while processing
- * XLOG_XACT_ASSIGNMENT, to avoid array overflow. Then those XIDs
- * will be removed again when the top-level xact commits or aborts.
+ * XLOG_XACT_ASSIGNMENT, to avoid array overflow. Then those XIDs will be
+ * removed again when the top-level xact commits or aborts.
*
- * It might be possible to track such XIDs to distinguish this case
- * from actual errors, but it would be complicated and probably not
- * worth it. So, just ignore the search result.
+ * It might be possible to track such XIDs to distinguish this case from
+ * actual errors, but it would be complicated and probably not worth it.
+ * So, just ignore the search result.
*/
(void) KnownAssignedXidsSearch(xid, true);
}
KnownAssignedXidsRemoveTree(TransactionId xid, int nsubxids,
TransactionId *subxids)
{
- int i;
+ int i;
if (TransactionIdIsValid(xid))
KnownAssignedXidsRemove(xid);
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
- int count = 0;
- int head, tail, i;
+ int count = 0;
+ int head,
+ tail,
+ i;
if (!TransactionIdIsValid(removeXid))
{
elog(trace_recovery(DEBUG4), "prune KnownAssignedXids to %u", removeXid);
/*
- * Mark entries invalid starting at the tail. Since array is sorted,
- * we can stop as soon as we reach a entry >= removeXid.
+ * Mark entries invalid starting at the tail. Since array is sorted, we
+ * can stop as soon as we reach a entry >= removeXid.
*/
tail = pArray->tailKnownAssignedXids;
head = pArray->headKnownAssignedXids;
{
if (KnownAssignedXidsValid[i])
{
- TransactionId knownXid = KnownAssignedXids[i];
+ TransactionId knownXid = KnownAssignedXids[i];
if (TransactionIdFollowsOrEquals(knownXid, removeXid))
break;
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
int count = 0;
- int head, tail;
+ int head,
+ tail;
int i;
/*
- * Fetch head just once, since it may change while we loop.
- * We can stop once we reach the initially seen head, since
- * we are certain that an xid cannot enter and then leave the
- * array while we hold ProcArrayLock. We might miss newly-added
- * xids, but they should be >= xmax so irrelevant anyway.
+ * Fetch head just once, since it may change while we loop. We can stop
+ * once we reach the initially seen head, since we are certain that an xid
+ * cannot enter and then leave the array while we hold ProcArrayLock. We
+ * might miss newly-added xids, but they should be >= xmax so irrelevant
+ * anyway.
*
* Must take spinlock to ensure we see up-to-date array contents.
*/
{
/* use volatile pointer to prevent code rearrangement */
volatile ProcArrayStruct *pArray = procArray;
- StringInfoData buf;
- int head, tail, i;
- int nxids = 0;
+ StringInfoData buf;
+ int head,
+ tail,
+ i;
+ int nxids = 0;
tail = pArray->tailKnownAssignedXids;
head = pArray->headKnownAssignedXids;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.104 2010/04/28 16:54:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.105 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* already in the shmem index (hence, already initialized).
*
* Note: before Postgres 9.0, this function returned NULL for some failure
- * cases. Now, it always throws error instead, so callers need not check
+ * cases. Now, it always throws error instead, so callers need not check
* for NULL.
*/
void *
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("could not create ShmemIndex entry for data structure \"%s\"",
- name)));
+ errmsg("could not create ShmemIndex entry for data structure \"%s\"",
+ name)));
}
if (*foundPtr)
{
LWLockRelease(ShmemIndexLock);
ereport(ERROR,
- (errmsg("ShmemIndex entry size is wrong for data structure"
- " \"%s\": expected %lu, actual %lu",
- name,
- (unsigned long) size,
- (unsigned long) result->size)));
+ (errmsg("ShmemIndex entry size is wrong for data structure"
+ " \"%s\": expected %lu, actual %lu",
+ name,
+ (unsigned long) size,
+ (unsigned long) result->size)));
}
structPtr = result->location;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.26 2010/07/03 20:43:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/standby.c,v 1.27 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static TimestampTz
GetStandbyLimitTime(void)
{
- TimestampTz rtime;
+ TimestampTz rtime;
bool fromStream;
/*
* The cutoff time is the last WAL data receipt time plus the appropriate
- * delay variable. Delay of -1 means wait forever.
+ * delay variable. Delay of -1 means wait forever.
*/
GetXLogReceiptTime(&rtime, &fromStream);
if (fromStream)
static bool
WaitExceedsMaxStandbyDelay(void)
{
- TimestampTz ltime;
+ TimestampTz ltime;
/* Are we past the limit time? */
ltime = GetStandbyLimitTime();
pg_usleep(standbyWait_us);
/*
- * Progressively increase the sleep times, but not to more than 1s,
- * since pg_usleep isn't interruptable on some platforms.
+ * Progressively increase the sleep times, but not to more than 1s, since
+ * pg_usleep isn't interruptable on some platforms.
*/
standbyWait_us *= 2;
if (standbyWait_us > 1000000)
ResolveRecoveryConflictWithBufferPin(void)
{
bool sig_alarm_enabled = false;
- TimestampTz ltime;
- TimestampTz now;
+ TimestampTz ltime;
+ TimestampTz now;
Assert(InHotStandby);
* up from a checkpoint and are immediately at our starting point, we
* unconditionally move to STANDBY_INITIALIZED. After this point we
* must do 4 things:
- * * move shared nextXid forwards as we see new xids
- * * extend the clog and subtrans with each new xid
- * * keep track of uncommitted known assigned xids
- * * keep track of uncommitted AccessExclusiveLocks
+ * * move shared nextXid forwards as we see new xids
+ * * extend the clog and subtrans with each new xid
+ * * keep track of uncommitted known assigned xids
+ * * keep track of uncommitted AccessExclusiveLocks
*
* When we see a commit/abort we must remove known assigned xids and locks
* from the completing transaction. Attempted removals that cannot locate
/*
* Get details of any AccessExclusiveLocks being held at the moment.
*
- * XXX GetRunningTransactionLocks() currently holds a lock on all partitions
- * though it is possible to further optimise the locking. By reference
- * counting locks and storing the value on the ProcArray entry for each backend
- * we can easily tell if any locks need recording without trying to acquire
- * the partition locks and scanning the lock table.
+ * XXX GetRunningTransactionLocks() currently holds a lock on all
+ * partitions though it is possible to further optimise the locking. By
+ * reference counting locks and storing the value on the ProcArray entry
+ * for each backend we can easily tell if any locks need recording without
+ * trying to acquire the partition locks and scanning the lock table.
*/
locks = GetRunningTransactionLocks(&nlocks);
if (nlocks > 0)
* record we write, because standby will open up when it sees this.
*/
running = GetRunningTransactionData();
+
/*
- * The gap between GetRunningTransactionData() and LogCurrentRunningXacts()
- * is what most of the fuss is about here, so artifically extending this
- * interval is a great way to test the little used parts of the code.
+ * The gap between GetRunningTransactionData() and
+ * LogCurrentRunningXacts() is what most of the fuss is about here, so
+ * artifically extending this interval is a great way to test the little
+ * used parts of the code.
*/
LogCurrentRunningXacts(running);
if (CurrRunningXacts->subxid_overflow)
elog(trace_recovery(DEBUG2),
- "snapshot of %u running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
- CurrRunningXacts->xcnt,
- recptr.xlogid, recptr.xrecoff,
- CurrRunningXacts->oldestRunningXid,
- CurrRunningXacts->latestCompletedXid,
- CurrRunningXacts->nextXid);
+ "snapshot of %u running transactions overflowed (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ CurrRunningXacts->xcnt,
+ recptr.xlogid, recptr.xrecoff,
+ CurrRunningXacts->oldestRunningXid,
+ CurrRunningXacts->latestCompletedXid,
+ CurrRunningXacts->nextXid);
else
elog(trace_recovery(DEBUG2),
- "snapshot of %u running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
- CurrRunningXacts->xcnt,
- recptr.xlogid, recptr.xrecoff,
- CurrRunningXacts->oldestRunningXid,
- CurrRunningXacts->latestCompletedXid,
- CurrRunningXacts->nextXid);
+ "snapshot of %u running transaction ids (lsn %X/%X oldest xid %u latest complete %u next xid %u)",
+ CurrRunningXacts->xcnt,
+ recptr.xlogid, recptr.xrecoff,
+ CurrRunningXacts->oldestRunningXid,
+ CurrRunningXacts->latestCompletedXid,
+ CurrRunningXacts->nextXid);
}
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.220 2010/07/03 20:43:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.221 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
long secs;
int usecs;
struct itimerval timeval;
+
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
if (secs == 0 && usecs == 0)
CheckStandbyTimeout(void)
{
TimestampTz now;
- bool reschedule = false;
+ bool reschedule = false;
standby_timeout_active = false;
now = GetCurrentTimestamp();
/*
- * Reschedule the timer if its not time to wake yet, or if we
- * have both timers set and the first one has just been reached.
+ * Reschedule the timer if its not time to wake yet, or if we have both
+ * timers set and the first one has just been reached.
*/
if (now >= statement_fin_time)
{
{
/*
* We're still waiting when we reach deadlock timeout, so send out
- * a request to have other backends check themselves for
- * deadlock. Then continue waiting until statement_fin_time,
- * if that's set.
+ * a request to have other backends check themselves for deadlock.
+ * Then continue waiting until statement_fin_time, if that's set.
*/
SendRecoveryConflictWithBufferPin(PROCSIG_RECOVERY_CONFLICT_STARTUP_DEADLOCK);
deadlock_timeout_active = false;
long secs;
int usecs;
struct itimerval timeval;
+
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
if (secs == 0 && usecs == 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.104 2010/06/30 18:10:23 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.105 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
if ((fid == F_PG_GET_EXPR || fid == F_PG_GET_EXPR_EXT) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("argument to pg_get_expr() must come from system catalogs")));
+ errmsg("argument to pg_get_expr() must come from system catalogs")));
/*
* Prepare function call info block and insert arguments.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.594 2010/05/12 19:45:02 sriggs Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.595 2010/07/06 19:18:57 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
/*
* All conflicts apart from database cause dynamic errors where the
* command or transaction can be retried at a later point with some
- * potential for success. No need to reset this, since
- * non-retryable conflict errors are currently FATAL.
+ * potential for success. No need to reset this, since non-retryable
+ * conflict errors are currently FATAL.
*/
if (reason == PROCSIG_RECOVERY_CONFLICT_DATABASE)
RecoveryConflictRetryable = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tsearch/ts_typanalyze.c,v 1.9 2010/05/30 21:59:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tsearch/ts_typanalyze.c,v 1.10 2010/07/06 19:18:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* language's frequency table, where K is the target number of entries in
* the MCELEM array plus an arbitrary constant, meant to reflect the fact
* that the most common words in any language would usually be stopwords
- * so we will not actually see them in the input. We assume that the
+ * so we will not actually see them in the input. We assume that the
* distribution of word frequencies (including the stopwords) follows Zipf's
* law with an exponent of 1.
*
* Assuming Zipfian distribution, the frequency of the K'th word is equal
* to 1/(K * H(W)) where H(n) is 1/2 + 1/3 + ... + 1/n and W is the number of
- * words in the language. Putting W as one million, we get roughly 0.07/K.
+ * words in the language. Putting W as one million, we get roughly 0.07/K.
* Assuming top 10 words are stopwords gives s = 0.07/(K + 10). We set
* epsilon = s/10, which gives bucket width w = (K + 10)/0.007 and
* maximum expected hashtable size of about 1000 * (K + 10).
TrackItem *item;
/*
- * We want statistics_target * 10 lexemes in the MCELEM array. This
+ * We want statistics_target * 10 lexemes in the MCELEM array. This
* multiplier is pretty arbitrary, but is meant to reflect the fact that
- * the number of individual lexeme values tracked in pg_statistic ought
- * to be more than the number of values for a simple scalar column.
+ * the number of individual lexeme values tracked in pg_statistic ought to
+ * be more than the number of values for a simple scalar column.
*/
num_mcelem = stats->attr->attstattarget * 10;
/*
* Construct an array of the interesting hashtable items, that is,
- * those meeting the cutoff frequency (s - epsilon)*N. Also identify
+ * those meeting the cutoff frequency (s - epsilon)*N. Also identify
* the minimum and maximum frequencies among these items.
*
* Since epsilon = s/10 and bucket_width = 1/epsilon, the cutoff
*/
cutoff_freq = 9 * lexeme_no / bucket_width;
- i = hash_get_num_entries(lexemes_tab); /* surely enough space */
+ i = hash_get_num_entries(lexemes_tab); /* surely enough space */
sort_table = (TrackItem **) palloc(sizeof(TrackItem *) * i);
hash_seq_init(&scan_status, lexemes_tab);
num_mcelem, bucket_width, lexeme_no, i, track_len);
/*
- * If we obtained more lexemes than we really want, get rid of
- * those with least frequencies. The easiest way is to qsort the
- * array into descending frequency order and truncate the array.
+ * If we obtained more lexemes than we really want, get rid of those
+ * with least frequencies. The easiest way is to qsort the array into
+ * descending frequency order and truncate the array.
*/
if (num_mcelem < track_len)
{
mcelem_freqs = (float4 *) palloc((num_mcelem + 2) * sizeof(float4));
/*
- * See comments above about use of nonnull_cnt as the divisor
- * for the final frequency estimates.
+ * See comments above about use of nonnull_cnt as the divisor for
+ * the final frequency estimates.
*/
for (i = 0; i < num_mcelem; i++)
{
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.170 2010/04/07 21:41:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.171 2010/07/06 19:18:58 momjian Exp $
*
*
* Portions Copyright (c) 1999-2010, PostgreSQL Global Development Group
s += SKIP_THth(n->suffix);
break;
case DCH_Q:
+
/*
- * We ignore 'Q' when converting to date because it is
- * unclear which date in the quarter to use, and some
- * people specify both quarter and month, so if it was
- * honored it might conflict with the supplied month.
- * That is also why we don't throw an error.
+ * We ignore 'Q' when converting to date because it is unclear
+ * which date in the quarter to use, and some people specify
+ * both quarter and month, so if it was honored it might
+ * conflict with the supplied month. That is also why we don't
+ * throw an error.
*
* We still parse the source string for an integer, but it
* isn't stored anywhere in 'out'.
* Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.29 2010/05/28 18:18:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like_match.c,v 1.30 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* If there are wildcards immediately following the %, we can skip
* over them first, using the idea that any sequence of N _'s and
* one or more %'s is equivalent to N _'s and one % (ie, it will
- * match any sequence of at least N text characters). In this
- * way we will always run the recursive search loop using a
- * pattern fragment that begins with a literal character-to-match,
- * thereby not recursing more than we have to.
+ * match any sequence of at least N text characters). In this way
+ * we will always run the recursive search loop using a pattern
+ * fragment that begins with a literal character-to-match, thereby
+ * not recursing more than we have to.
*/
NextByte(p, plen);
int matched = MatchText(t, tlen, p, plen);
if (matched != LIKE_FALSE)
- return matched; /* TRUE or ABORT */
+ return matched; /* TRUE or ABORT */
}
NextChar(t, tlen);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.77 2010/06/13 17:43:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.78 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case T_Integer:
return intVal(node);
case T_Float:
+
/*
- * Values too large for int4 will be represented as Float constants
- * by the lexer. Accept these if they are valid OID strings.
+ * Values too large for int4 will be represented as Float
+ * constants by the lexer. Accept these if they are valid OID
+ * strings.
*/
return oidin_subr(strVal(node), NULL);
default:
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node));
}
- return InvalidOid; /* keep compiler quiet */
+ return InvalidOid; /* keep compiler quiet */
}
*
* Portions Copyright (c) 2002-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.56 2010/04/26 14:17:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.57 2010/07/06 19:18:58 momjian Exp $
*
*-----------------------------------------------------------------------
*/
*
* FYI, The Open Group locale standard is defined here:
*
- * http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap07.html
+ * http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap07.html
*----------
*/
static char *
db_encoding_strdup(int encoding, const char *str)
{
- char *pstr;
- char *mstr;
+ char *pstr;
+ char *mstr;
/* convert the string to the database encoding */
pstr = (char *) pg_do_encoding_conversion(
- (unsigned char *) str, strlen(str),
- encoding, GetDatabaseEncoding());
+ (unsigned char *) str, strlen(str),
+ encoding, GetDatabaseEncoding());
mstr = strdup(pstr);
if (pstr != str)
pfree(pstr);
char *grouping;
char *thousands_sep;
int encoding;
+
#ifdef WIN32
char *save_lc_ctype;
#endif
save_lc_numeric = pstrdup(save_lc_numeric);
#ifdef WIN32
- /*
- * Ideally, monetary and numeric local symbols could be returned in
- * any server encoding. Unfortunately, the WIN32 API does not allow
- * setlocale() to return values in a codepage/CTYPE that uses more
- * than two bytes per character, like UTF-8:
- *
- * http://msdn.microsoft.com/en-us/library/x99tb11d.aspx
- *
- * Evidently, LC_CTYPE allows us to control the encoding used
- * for strings returned by localeconv(). The Open Group
- * standard, mentioned at the top of this C file, doesn't
- * explicitly state this.
- *
- * Therefore, we set LC_CTYPE to match LC_NUMERIC or LC_MONETARY
- * (which cannot be UTF8), call localeconv(), and then convert from
- * the numeric/monitary LC_CTYPE to the server encoding. One
- * example use of this is for the Euro symbol.
- *
- * Perhaps someday we will use GetLocaleInfoW() which returns values
- * in UTF16 and convert from that.
- */
+
+ /*
+ * Ideally, monetary and numeric local symbols could be returned in any
+ * server encoding. Unfortunately, the WIN32 API does not allow
+ * setlocale() to return values in a codepage/CTYPE that uses more than
+ * two bytes per character, like UTF-8:
+ *
+ * http://msdn.microsoft.com/en-us/library/x99tb11d.aspx
+ *
+ * Evidently, LC_CTYPE allows us to control the encoding used for strings
+ * returned by localeconv(). The Open Group standard, mentioned at the
+ * top of this C file, doesn't explicitly state this.
+ *
+ * Therefore, we set LC_CTYPE to match LC_NUMERIC or LC_MONETARY (which
+ * cannot be UTF8), call localeconv(), and then convert from the
+ * numeric/monitary LC_CTYPE to the server encoding. One example use of
+ * this is for the Euro symbol.
+ *
+ * Perhaps someday we will use GetLocaleInfoW() which returns values in
+ * UTF16 and convert from that.
+ */
/* save user's value of ctype locale */
save_lc_ctype = setlocale(LC_CTYPE, NULL);
len = wcsftime(wbuf, MAX_L10N_DATA, format, tm);
if (len == 0)
+
/*
* strftime call failed - return 0 with the contents of dst
* unspecified
/* redefine strftime() */
#define strftime(a,b,c,d) strftime_win32(a,b,L##c,d)
-
#endif /* WIN32 */
char buf[MAX_L10N_DATA];
char *ptr;
int i;
+
#ifdef WIN32
char *save_lc_ctype;
#endif
save_lc_time = pstrdup(save_lc_time);
#ifdef WIN32
+
/*
* On WIN32, there is no way to get locale-specific time values in a
* specified locale, like we do for monetary/numeric. We can only get
* CP_ACP (see strftime_win32) or UTF16. Therefore, we get UTF16 and
- * convert it to the database locale. However, wcsftime() internally
- * uses LC_CTYPE, so we set it here. See the WIN32 comment near the
- * top of PGLC_localeconv().
+ * convert it to the database locale. However, wcsftime() internally uses
+ * LC_CTYPE, so we set it here. See the WIN32 comment near the top of
+ * PGLC_localeconv().
*/
/* save user's value of ctype locale */
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.97 2010/03/03 17:29:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/xml.c,v 1.98 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* pg_xml_init --- set up for use of libxml
*
* This should be called by each function that is about to use libxml
- * facilities. It has two responsibilities: verify compatibility with the
+ * facilities. It has two responsibilities: verify compatibility with the
* loaded libxml version (done on first call in a session) and establish
* or re-establish our libxml error handler. The latter needs to be done
* anytime we might have passed control to add-on modules (eg libperl) which
print_xml_decl(StringInfo buf, const xmlChar *version,
pg_enc encoding, int standalone)
{
- pg_xml_init(); /* why is this here? */
+ pg_xml_init(); /* why is this here? */
if ((version && strcmp((char *) version, PG_XML_DEFAULT_VERSION) != 0)
|| (encoding && encoding != PG_UTF8)
/*
* It might seem that we should just pass xml_err_buf->data directly to
* errdetail. However, we want to clean out xml_err_buf before throwing
- * error, in case there is another function using libxml further down
- * the call stack.
+ * error, in case there is another function using libxml further down the
+ * call stack.
*/
if (xml_err_buf->len > 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.152 2010/04/20 23:48:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.153 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
switch (cache->id)
{
case INDEXRELID:
+
/*
* Rather than tracking exactly which indexes have to be loaded
* before we can use indexscans (which changes from time to time),
case AMOID:
case AMNAME:
+
/*
* Always do heap scans in pg_am, because it's so small there's
* not much point in an indexscan anyway. We *must* do this when
case AUTHNAME:
case AUTHOID:
case AUTHMEMMEMROLE:
+
/*
* Protect authentication lookups occurring before relcache has
* collected entries for shared indexes.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.310 2010/04/20 23:48:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.311 2010/07/06 19:18:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* NB: when rebuilding, we'd better hold some lock on the relation,
* else the catalog data we need to read could be changing under us.
- * Also, a rel to be rebuilt had better have refcnt > 0. This is because
+ * Also, a rel to be rebuilt had better have refcnt > 0. This is because
* an sinval reset could happen while we're accessing the catalogs, and
* the rel would get blown away underneath us by RelationCacheInvalidate
* if it has zero refcnt.
Oid old_reltype = relation->rd_rel->reltype;
/*
- * As per notes above, a rel to be rebuilt MUST have refcnt > 0; while
- * of course it would be a bad idea to blow away one with nonzero refcnt.
+ * As per notes above, a rel to be rebuilt MUST have refcnt > 0; while of
+ * course it would be a bad idea to blow away one with nonzero refcnt.
*/
Assert(rebuild ?
!RelationHasReferenceCountZero(relation) :
* forget the "new" status of the relation, which is a useful
* optimization to have. Ditto for the new-relfilenode status.
*
- * The rel could have zero refcnt here, so temporarily increment
- * the refcnt to ensure it's safe to rebuild it. We can assume that
- * the current transaction has some lock on the rel already.
+ * The rel could have zero refcnt here, so temporarily increment the
+ * refcnt to ensure it's safe to rebuild it. We can assume that the
+ * current transaction has some lock on the rel already.
*/
RelationIncrementReferenceCount(relation);
RelationClearRelation(relation, true);
/*
* Pre-existing rels can be dropped from the relcache if not open.
*/
- bool rebuild = !RelationHasReferenceCountZero(relation);
+ bool rebuild = !RelationHasReferenceCountZero(relation);
RelationClearRelation(relation, rebuild);
}
RelationMapInitializePhase2();
/*
- * In bootstrap mode, the shared catalogs aren't there yet anyway,
- * so do nothing.
+ * In bootstrap mode, the shared catalogs aren't there yet anyway, so do
+ * nothing.
*/
if (IsBootstrapProcessingMode())
return;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.212 2010/04/26 10:52:00 rhaas Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.213 2010/07/06 19:18:58 momjian Exp $
*
*
*-------------------------------------------------------------------------
elog(FATAL, "could not disable timer for authorization timeout");
/*
- * Log connection for streaming replication even if Log_connections disabled.
+ * Log connection for streaming replication even if Log_connections
+ * disabled.
*/
if (am_walsender)
{
if (port->remote_port[0])
ereport(LOG,
- (errmsg("replication connection authorized: user=%s host=%s port=%s",
- port->user_name,
- port->remote_host,
- port->remote_port)));
+ (errmsg("replication connection authorized: user=%s host=%s port=%s",
+ port->user_name,
+ port->remote_host,
+ port->remote_port)));
else
ereport(LOG,
(errmsg("replication connection authorized: user=%s host=%s",
- port->user_name,
- port->remote_host)));
+ port->user_name,
+ port->remote_host)));
}
else if (Log_connections)
ereport(LOG,
if (IsUnderPostmaster)
{
/*
- * The postmaster already started the XLOG machinery, but we need
- * to call InitXLOGAccess(), if the system isn't in hot-standby mode.
+ * The postmaster already started the XLOG machinery, but we need to
+ * call InitXLOGAccess(), if the system isn't in hot-standby mode.
* This is handled by calling RecoveryInProgress and ignoring the
* result.
*/
else
{
/*
- * We are either a bootstrap process or a standalone backend.
- * Either way, start up the XLOG machinery, and register to have it
- * closed down at exit.
+ * We are either a bootstrap process or a standalone backend. Either
+ * way, start up the XLOG machinery, and register to have it closed
+ * down at exit.
*/
StartupXLOG();
on_shmem_exit(ShutdownXLOG, 0);
}
/*
- * If we're trying to shut down, only superusers can connect, and
- * new replication connections are not allowed.
+ * If we're trying to shut down, only superusers can connect, and new
+ * replication connections are not allowed.
*/
if ((!am_superuser || am_walsender) &&
MyProcPort != NULL &&
if (am_walsender)
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("new replication connections are not allowed during database shutdown")));
+ errmsg("new replication connections are not allowed during database shutdown")));
else
ereport(FATAL,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to connect during database shutdown")));
+ errmsg("must be superuser to connect during database shutdown")));
}
/*
- * The last few connections slots are reserved for superusers.
- * Although replication connections currently require superuser
- * privileges, we don't allow them to consume the reserved slots,
- * which are intended for interactive use.
+ * The last few connections slots are reserved for superusers. Although
+ * replication connections currently require superuser privileges, we
+ * don't allow them to consume the reserved slots, which are intended for
+ * interactive use.
*/
if ((!am_superuser || am_walsender) &&
ReservedBackends > 0 &&
*
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.95 2010/02/27 03:55:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.96 2010/07/06 19:18:58 momjian Exp $
*/
#include "postgres.h"
{
utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
dstlen = MultiByteToWideChar(codepage, 0, str, len, utf16, len);
- utf16[dstlen] = L'\0';
+ utf16[dstlen] = L '\0';
}
else
{
utf16 = (WCHAR *) palloc(sizeof(WCHAR) * (len + 1));
dstlen = MultiByteToWideChar(CP_UTF8, 0, utf8, len, utf16, len);
- utf16[dstlen] = L'\0';
+ utf16[dstlen] = L '\0';
if (utf8 != str)
pfree(utf8);
* Written by Peter Eisentraut <peter_e@gmx.net>.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.559 2010/07/03 21:23:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.560 2010/07/06 19:18:58 momjian Exp $
*
*--------------------------------------------------------------------
*/
static char *_ShowOption(struct config_generic * record, bool use_units);
static bool is_newvalue_equal(struct config_generic * record, const char *newvalue);
static bool validate_option_array_item(const char *name, const char *value,
- bool skipIfNoPermissions);
+ bool skipIfNoPermissions);
/*
case PGC_S_DATABASE:
case PGC_S_USER:
case PGC_S_DATABASE_USER:
+
/*
- * The existing value came from an ALTER ROLE/DATABASE SET command.
- * We can assume that at the time the command was issued, we
- * checked that the issuing user was superuser if the variable
- * requires superuser privileges to set. So it's safe to
- * use SUSET context here.
+ * The existing value came from an ALTER ROLE/DATABASE SET
+ * command. We can assume that at the time the command was issued,
+ * we checked that the issuing user was superuser if the variable
+ * requires superuser privileges to set. So it's safe to use
+ * SUSET context here.
*/
phcontext = PGC_SUSET;
break;
case PGC_S_CLIENT:
case PGC_S_SESSION:
default:
+
/*
- * We must assume that the value came from an untrusted user,
- * even if the current_user is a superuser.
+ * We must assume that the value came from an untrusted user, even
+ * if the current_user is a superuser.
*/
phcontext = PGC_USERSET;
break;
* Validate a proposed option setting for GUCArrayAdd/Delete/Reset.
*
* name is the option name. value is the proposed value for the Add case,
- * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
+ * or NULL for the Delete/Reset cases. If skipIfNoPermissions is true, it's
* not an error to have no permissions to set the option.
*
* Returns TRUE if OK, FALSE if skipIfNoPermissions is true and user does not
* SUSET and user is superuser).
*
* name is not known, but exists or can be created as a placeholder
- * (implying it has a prefix listed in custom_variable_classes).
- * We allow this case if you're a superuser, otherwise not. Superusers
- * are assumed to know what they're doing. We can't allow it for other
- * users, because when the placeholder is resolved it might turn out to
- * be a SUSET variable; define_custom_variable assumes we checked that.
+ * (implying it has a prefix listed in custom_variable_classes). We allow
+ * this case if you're a superuser, otherwise not. Superusers are assumed
+ * to know what they're doing. We can't allow it for other users, because
+ * when the placeholder is resolved it might turn out to be a SUSET
+ * variable; define_custom_variable assumes we checked that.
*
* name is not known and can't be created as a placeholder. Throw error,
- * unless skipIfNoPermissions is true, in which case return FALSE.
- * (It's tempting to allow this case to superusers, if the name is
- * qualified but not listed in custom_variable_classes. That would
- * ease restoring of dumps containing ALTER ROLE/DATABASE SET. However,
- * it's not clear that this usage justifies such a loss of error checking.
- * You can always fix custom_variable_classes before you restore.)
+ * unless skipIfNoPermissions is true, in which case return FALSE. (It's
+ * tempting to allow this case to superusers, if the name is qualified but
+ * not listed in custom_variable_classes. That would ease restoring of
+ * dumps containing ALTER ROLE/DATABASE SET. However, it's not clear that
+ * this usage justifies such a loss of error checking. You can always fix
+ * custom_variable_classes before you restore.)
*/
gconf = find_option(name, true, WARNING);
if (!gconf)
return false;
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
}
if (gconf->flags & GUC_CUSTOM_PLACEHOLDER)
/* manual permissions check so we can avoid an error being thrown */
if (gconf->context == PGC_USERSET)
- /* ok */ ;
+ /* ok */ ;
else if (gconf->context == PGC_SUSET && superuser())
- /* ok */ ;
+ /* ok */ ;
else if (skipIfNoPermissions)
return false;
/* if a permissions error should be thrown, let set_config_option do it */
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.41 2010/05/27 19:19:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.42 2010/07/06 19:18:59 momjian Exp $
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
* various details abducted from various places
static size_t last_status_len; /* use to minimize length of clobber */
#endif /* PS_USE_CLOBBER_ARGV */
-static size_t ps_buffer_cur_len; /* nominal strlen(ps_buffer) */
+static size_t ps_buffer_cur_len; /* nominal strlen(ps_buffer) */
static size_t ps_buffer_fixed_size; /* size of the constant prefix */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.119 2010/07/05 09:27:17 heikki Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.120 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * If a portal is still pinned, forcibly unpin it. PortalDrop will
- * not let us drop the portal otherwise. Whoever pinned the portal
- * was interrupted by the abort too and won't try to use it anymore.
+ * If a portal is still pinned, forcibly unpin it. PortalDrop will not
+ * let us drop the portal otherwise. Whoever pinned the portal was
+ * interrupted by the abort too and won't try to use it anymore.
*/
if (portal->portalPinned)
portal->portalPinned = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.186 2010/06/28 02:07:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.187 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If we haven't already read the header, do so.
*
- * NB: this code must agree with _discoverArchiveFormat(). Maybe find
- * a way to unify the cases?
+ * NB: this code must agree with _discoverArchiveFormat(). Maybe find a
+ * way to unify the cases?
*/
if (!AH->readHeader)
{
pgoff_t tpos;
/*
- * If pgoff_t is wider than long, we must have "real" fseeko and not
- * an emulation using fseek. Otherwise report no seek capability.
+ * If pgoff_t is wider than long, we must have "real" fseeko and not an
+ * emulation using fseek. Otherwise report no seek capability.
*/
#ifndef HAVE_FSEEKO
if (sizeof(pgoff_t) > sizeof(long))
return false;
/*
- * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
+ * Check that fseeko(SEEK_SET) works, too. NB: we used to try to test
* this with fseeko(fp, 0, SEEK_CUR). But some platforms treat that as a
* successful no-op even on files that are otherwise unseekable.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.46 2010/06/28 02:07:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.47 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!ctx->hasSeek || tctx->dataState == K_OFFSET_POS_NOT_SET)
{
/*
- * We cannot seek directly to the desired block. Instead, skip
- * over block headers until we find the one we want. This could
- * fail if we are asked to restore items out-of-order.
+ * We cannot seek directly to the desired block. Instead, skip over
+ * block headers until we find the one we want. This could fail if we
+ * are asked to restore items out-of-order.
*/
_readBlockHeader(AH, &blkType, &id);
else if (!ctx->hasSeek)
die_horribly(AH, modulename, "could not find block ID %d in archive -- "
"possibly due to out-of-order restore request, "
- "which cannot be handled due to non-seekable input file\n",
+ "which cannot be handled due to non-seekable input file\n",
te->dumpId);
- else /* huh, the dataPos led us to EOF? */
+ else /* huh, the dataPos led us to EOF? */
die_horribly(AH, modulename, "could not find block ID %d in archive -- "
"possibly corrupt archive\n",
te->dumpId);
/*
* If possible, re-write the TOC in order to update the data offset
- * information. This is not essential, as pg_restore can cope in
- * most cases without it; but it can make pg_restore significantly
- * faster in some situations (especially parallel restore).
+ * information. This is not essential, as pg_restore can cope in most
+ * cases without it; but it can make pg_restore significantly faster
+ * in some situations (especially parallel restore).
*/
if (ctx->hasSeek &&
fseeko(AH->FH, tpos, SEEK_SET) == 0)
* http://archives.postgresql.org/pgsql-bugs/2010-02/msg00187.php
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.580 2010/05/15 21:41:16 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.581 2010/07/06 19:18:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* find one, create a CONSTRAINT entry linked to the INDEX entry. We
* assume an index won't have more than one internal dependency.
*
- * As of 9.0 we don't need to look at pg_depend but can check for
- * a match to pg_constraint.conindid. The check on conrelid is
+ * As of 9.0 we don't need to look at pg_depend but can check for a
+ * match to pg_constraint.conindid. The check on conrelid is
* redundant but useful because that column is indexed while conindid
* is not.
*/
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.220 2010/05/21 17:37:44 rhaas Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.221 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
len = strlen(opt);
while (len > 0 &&
(isspace((unsigned char) opt[len - 1])
- || opt[len - 1] == ';'))
+ || opt[len - 1] == ';'))
opt[--len] = '\0';
}
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.145 2010/05/28 20:02:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.146 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
/*
* Make sure to flush the output stream, so intermediate results are
- * visible to the client immediately. We check the results because
- * if the pager dies/exits/etc, there's no sense throwing more data
- * at it.
+ * visible to the client immediately. We check the results because if
+ * the pager dies/exits/etc, there's no sense throwing more data at
+ * it.
*/
flush_error = fflush(pset.queryFout);
/*
- * Check if we are at the end, if a cancel was pressed, or if
- * there were any errors either trying to flush out the results,
- * or more generally on the output stream at all. If we hit any
- * errors writing things to the stream, we presume $PAGER has
- * disappeared and stop bothering to pull down more data.
+ * Check if we are at the end, if a cancel was pressed, or if there
+ * were any errors either trying to flush out the results, or more
+ * generally on the output stream at all. If we hit any errors
+ * writing things to the stream, we presume $PAGER has disappeared and
+ * stop bothering to pull down more data.
*/
if (ntuples < pset.fetch_count || cancel_pressed || flush_error ||
ferror(pset.queryFout))
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.241 2010/03/11 21:29:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.242 2010/07/06 19:18:59 momjian Exp $
*/
#include "postgres_fe.h"
if (pset.sversion >= 90000)
appendPQExpBuffer(&buf,
" (NOT i.indimmediate) AND "
- "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
+ "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
"condeferrable) AS condeferrable,\n"
" (NOT i.indimmediate) AND "
- "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
+ "EXISTS (SELECT 1 FROM pg_catalog.pg_constraint "
"WHERE conrelid = i.indrelid AND "
"conindid = i.indexrelid AND "
"contype IN ('p','u','x') AND "
appendPQExpBuffer(&buf, "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true),\n ");
if (pset.sversion >= 90000)
appendPQExpBuffer(&buf,
- "pg_catalog.pg_get_constraintdef(con.oid, true), "
+ "pg_catalog.pg_get_constraintdef(con.oid, true), "
"contype, condeferrable, condeferred");
else
appendPQExpBuffer(&buf,
"null AS constraintdef, null AS contype, "
- "false AS condeferrable, false AS condeferred");
+ "false AS condeferrable, false AS condeferred");
if (pset.sversion >= 80000)
appendPQExpBuffer(&buf, ", c2.reltablespace");
appendPQExpBuffer(&buf,
/* Print tablespace of the index on the same line */
if (pset.sversion >= 80000)
add_tablespace_footer(&cont, 'i',
- atooid(PQgetvalue(result, i, 10)),
+ atooid(PQgetvalue(result, i, 10)),
false);
}
}
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.127 2010/05/09 18:17:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.128 2010/07/06 19:19:00 momjian Exp $
*/
#include "postgres_fe.h"
/* spaces first */
fprintf(fout, "%*s", width_wrap[j] - chars_to_output, "");
fputnbytes(fout,
- (char *) (this_line->ptr + bytes_output[j]),
+ (char *) (this_line->ptr + bytes_output[j]),
bytes_to_output);
}
else /* Left aligned cell */
{
/* spaces second */
fputnbytes(fout,
- (char *) (this_line->ptr + bytes_output[j]),
+ (char *) (this_line->ptr + bytes_output[j]),
bytes_to_output);
}
{
if (content->cellmustfree == NULL)
content->cellmustfree = pg_local_calloc(
- content->ncolumns * content->nrows + 1, sizeof(bool));
+ content->ncolumns * content->nrows + 1, sizeof(bool));
content->cellmustfree[content->cellsadded] = true;
}
{
if (content->cellmustfree)
{
- int i;
+ int i;
+
for (i = 0; i < content->nrows * content->ncolumns; i++)
{
if (content->cellmustfree[i])
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.45 2010/03/01 20:55:45 heikki Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.h,v 1.46 2010/07/06 19:19:00 momjian Exp $
*/
#ifndef PRINT_H
#define PRINT_H
extern void printTableAddHeader(printTableContent *const content,
const char *header, const bool translate, const char align);
extern void printTableAddCell(printTableContent *const content,
- const char *cell, const bool translate, const bool mustfree);
+ const char *cell, const bool translate, const bool mustfree);
extern void printTableAddFooter(printTableContent *const content,
const char *footer);
extern void printTableSetFooter(printTableContent *const content,
*
* Copyright (c) 2000-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.199 2010/06/07 02:59:02 itagaki Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.200 2010/07/06 19:19:00 momjian Exp $
*/
/*----------------------------------------------------------------------
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev2_wd, "AGGREGATE") == 0 ||
pg_strcasecmp(prev2_wd, "FUNCTION") == 0))
- COMPLETE_WITH_CONST("(");
+ COMPLETE_WITH_CONST("(");
/* ALTER AGGREGATE,FUNCTION <name> (...) */
else if (pg_strcasecmp(prev4_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev3_wd, "AGGREGATE") == 0 ||
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.134 2010/03/28 09:27:02 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.135 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct xl_btree_delete
{
- RelFileNode node; /* RelFileNode of the index */
+ RelFileNode node; /* RelFileNode of the index */
BlockNumber block;
- RelFileNode hnode; /* RelFileNode of the heap the index currently points at */
+ RelFileNode hnode; /* RelFileNode of the heap the index currently
+ * points at */
int nitems;
/* TARGET OFFSET NUMBERS FOLLOW AT THE END */
extern void _bt_pageinit(Page page, Size size);
extern bool _bt_page_recyclable(Page page);
extern void _bt_delitems_delete(Relation rel, Buffer buf,
- OffsetNumber *itemnos, int nitems, Relation heapRel);
+ OffsetNumber *itemnos, int nitems, Relation heapRel);
extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
- OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed);
+ OffsetNumber *itemnos, int nitems, BlockNumber lastBlockVacuumed);
extern int _bt_pagedel(Relation rel, Buffer buf, BTStack stack);
/*
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.186 2010/03/30 21:58:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.187 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
double tuple_fraction; /* tuple_fraction passed to query_planner */
- bool hasInheritedTarget; /* true if parse->resultRelation is an
- * inheritance child rel */
+ bool hasInheritedTarget; /* true if parse->resultRelation is an
+ * inheritance child rel */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasHavingQual; /* true if havingQual was non-null */
bool hasPseudoConstantQuals; /* true if any RestrictInfo has
-/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.95 2010/05/28 16:34:15 itagaki Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.96 2010/07/06 19:19:00 momjian Exp $ */
#if defined(_MSC_VER) || defined(__BORLANDC__)
#define WIN32_ONLY_COMPILER
#else
#define PGDLLEXPORT __declspec (dllimport)
#endif
-
#else /* not CYGWIN, not MSVC, not MingW */
#define PGDLLIMPORT
#define PGDLLEXPORT
*
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/replication/walprotocol.h,v 1.1 2010/06/03 22:17:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/replication/walprotocol.h,v 1.2 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
XLogRecPtr walEnd;
/* Sender's system clock at the time of transmission */
- TimestampTz sendTime;
+ TimestampTz sendTime;
} WalDataMessageHeader;
/*
- * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
+ * Maximum data payload in a WAL data message. Must be >= XLOG_BLCKSZ.
*
* We don't have a good idea of what a good value would be; there's some
* overhead per message in both walsender and walreceiver, but on the other
*
* Portions Copyright (c) 2010-2010, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.10 2010/07/03 20:43:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/replication/walreceiver.h,v 1.11 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* PID of currently active walreceiver process, its current state and
- * start time (actually, the time at which it was requested to be started).
+ * start time (actually, the time at which it was requested to be
+ * started).
*/
pid_t pid;
WalRcvState walRcvState;
/*
* receivedUpto-1 is the last byte position that has already been
* received. When startup process starts the walreceiver, it sets
- * receivedUpto to the point where it wants the streaming to begin.
- * After that, walreceiver updates this whenever it flushes the received
- * WAL to disk.
+ * receivedUpto to the point where it wants the streaming to begin. After
+ * that, walreceiver updates this whenever it flushes the received WAL to
+ * disk.
*/
XLogRecPtr receivedUpto;
/*
* latestChunkStart is the starting byte position of the current "batch"
* of received WAL. It's actually the same as the previous value of
- * receivedUpto before the last flush to disk. Startup process can use
+ * receivedUpto before the last flush to disk. Startup process can use
* this to detect whether it's keeping up or not.
*/
XLogRecPtr latestChunkStart;
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.31 2010/05/15 20:01:32 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pmsignal.h,v 1.32 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef enum
{
PMSIGNAL_RECOVERY_STARTED, /* recovery has started */
- PMSIGNAL_BEGIN_HOT_STANDBY, /* begin Hot Standby */
+ PMSIGNAL_BEGIN_HOT_STANDBY, /* begin Hot Standby */
PMSIGNAL_WAKEN_ARCHIVER, /* send a NOTIFY signal to xlog archiver */
PMSIGNAL_ROTATE_LOGFILE, /* send SIGUSR1 to syslogger to rotate logfile */
PMSIGNAL_START_AUTOVAC_LAUNCHER, /* start an autovacuum launcher */
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.122 2010/05/26 19:52:52 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.123 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void handle_sig_alarm(SIGNAL_ARGS);
extern bool enable_standby_sig_alarm(TimestampTz now,
- TimestampTz fin_time, bool deadlock_only);
+ TimestampTz fin_time, bool deadlock_only);
extern bool disable_standby_sig_alarm(void);
extern void handle_standby_sig_alarm(SIGNAL_ARGS);
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.32 2010/05/13 11:15:38 sriggs Exp $
+ * $PostgreSQL: pgsql/src/include/storage/procarray.h,v 1.33 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void RecordKnownAssignedTransactionIds(TransactionId xid);
extern void ExpireTreeKnownAssignedTransactionIds(TransactionId xid,
- int nsubxids, TransactionId *subxids,
- TransactionId max_xid);
+ int nsubxids, TransactionId *subxids,
+ TransactionId max_xid);
extern void ExpireAllKnownAssignedTransactionIds(void);
extern void ExpireOldKnownAssignedTransactionIds(TransactionId xid);
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.349 2010/06/13 17:43:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.350 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Datum oidvectorge(PG_FUNCTION_ARGS);
extern Datum oidvectorgt(PG_FUNCTION_ARGS);
extern oidvector *buildoidvector(const Oid *oids, int n);
-extern Oid oidparse(Node *node);
+extern Oid oidparse(Node *node);
/* pseudotypes.c */
extern Datum cstring_in(PG_FUNCTION_ARGS);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.55 2010/05/07 19:35:03 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.56 2010/07/06 19:19:00 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
host ? "host=" : "", host ? host : "",
port ? "port=" : "", port ? port : "",
(user && strlen(user) > 0) ? "user=" : "", user ? user : "",
- (passwd && strlen(passwd) > 0) ? "password=" : "", passwd ? passwd : "",
+ (passwd && strlen(passwd) > 0) ? "password=" : "", passwd ? passwd : "",
options ? options : "");
/*
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.26 2010/05/08 16:39:52 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.27 2010/07/06 19:19:00 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
if (strcmp(sqlstate, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR) == 0)
{
- /* we might get here if the connection breaks down, so let's
- * check for this instead of giving just the generic internal error */
+ /*
+ * we might get here if the connection breaks down, so let's check for
+ * this instead of giving just the generic internal error
+ */
if (PQstatus(conn) == CONNECTION_BAD)
{
sqlstate = "57P02";
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.97 2010/05/25 17:28:20 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.98 2010/07/06 19:19:00 momjian Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
strcpy(mallocedval, "array [");
for (element = 0; element < asize; element++)
- sprintf(mallocedval + strlen(mallocedval), "%llu,", ((unsigned long long int*) var->value)[element]);
+ sprintf(mallocedval + strlen(mallocedval), "%llu,", ((unsigned long long int *) var->value)[element]);
strcpy(mallocedval + strlen(mallocedval) - 1, "]");
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.58 2010/05/25 17:28:20 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.59 2010/07/06 19:19:00 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#define LONG_LONG_MIN LLONG_MIN
#else
#define LONG_LONG_MIN LONGLONG_MIN
-#endif /* LLONG_MIN */
-#endif /* LONG_LONG_MIN */
-#endif /* HAVE_LONG_LONG_INT */
+#endif /* LLONG_MIN */
+#endif /* LONG_LONG_MIN */
+#endif /* HAVE_LONG_LONG_INT */
bool ecpg_internal_regression_mode = false;
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.115 2010/04/03 19:30:49 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.116 2010/07/06 19:19:00 momjian Exp $ */
/* Main for ecpg, the PostgreSQL embedded SQL precompiler. */
/* Copyright (c) 1996-2010, PostgreSQL Global Development Group */
fclose(yyin);
if (out_option == 0 && yyout != stdout)
fclose(yyout);
+
/*
* If there was an error, delete the output file.
*/
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.c,v 1.92 2010/04/03 07:53:29 petere Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/type.c,v 1.93 2010/07/06 19:19:00 momjian Exp $ */
#include "postgres_fe.h"
void
ECPGdump_a_type(FILE *o, const char *name, struct ECPGtype * type, const int brace_level,
- const char *ind_name, struct ECPGtype * ind_type, const int ind_brace_level,
+ const char *ind_name, struct ECPGtype * ind_type, const int ind_brace_level,
const char *prefix, const char *ind_prefix,
char *arr_str_siz, const char *struct_sizeof,
const char *ind_struct_sizeof)
#line 1 "struct.h"
-
-
-
- /* dec_t */
-
-
+
+
+
+
+ /* dec_t */
+
+
typedef struct mytype MYTYPE ;
-#line 9 "struct.h"
+#line 10 "struct.h"
-
-
-
-
-
-
+
+
+
+
+
+
+
typedef struct mynulltype MYNULLTYPE ;
-#line 18 "struct.h"
+#line 20 "struct.h"
#line 11 "outofscope.pgc"
struct mytype {
-#line 3 "struct.h"
+#line 4 "struct.h"
int id ;
-#line 4 "struct.h"
+#line 5 "struct.h"
char t [ 64 ] ;
-#line 5 "struct.h"
+#line 6 "struct.h"
double d1 ;
-#line 6 "struct.h"
+#line 7 "struct.h"
double d2 ;
-#line 7 "struct.h"
+#line 8 "struct.h"
char c [ 30 ] ;
} ; struct mynulltype {
-#line 12 "struct.h"
+#line 14 "struct.h"
int id ;
-#line 13 "struct.h"
+#line 15 "struct.h"
int t ;
-#line 14 "struct.h"
+#line 16 "struct.h"
int d1 ;
-#line 15 "struct.h"
+#line 17 "struct.h"
int d2 ;
-#line 16 "struct.h"
+#line 18 "struct.h"
int c ;
} ;/* exec sql end declare section */
#line 12 "outofscope.pgc"
#line 3 "strings.pgc"
/* exec sql begin declare section */
#line 1 "strings.h"
-
+
+
+
+
+
+
#line 5 "strings.pgc"
-char *s1, *s2, *s3, *s4, *s5, *s6;
+char *s1,
+ *s2,
+ *s3,
+ *s4,
+ *s5,
+ *s6;
-struct mytype {
- int id;
- char t[64];
- double d1; /* dec_t */
- double d2;
- char c[30];
+struct mytype
+{
+ int id;
+ char t[64];
+ double d1; /* dec_t */
+ double d2;
+ char c[30];
};
typedef struct mytype MYTYPE;
-struct mynulltype {
- int id;
- int t;
- int d1;
- int d2;
- int c;
+struct mynulltype
+{
+ int id;
+ int t;
+ int d1;
+ int d2;
+ int c;
};
typedef struct mynulltype MYNULLTYPE;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.394 2010/06/23 21:54:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.395 2010/07/06 19:19:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
"Fallback-Application-Name", "", 64},
{"keepalives", NULL, NULL, NULL,
- "TCP-Keepalives", "", 1}, /* should be just '0' or '1' */
+ "TCP-Keepalives", "", 1}, /* should be just '0' or '1' */
{"keepalives_idle", NULL, NULL, NULL,
- "TCP-Keepalives-Idle", "", 10}, /* strlen(INT32_MAX) == 10 */
+ "TCP-Keepalives-Idle", "", 10}, /* strlen(INT32_MAX) == 10 */
{"keepalives_interval", NULL, NULL, NULL,
"TCP-Keepalives-Interval", "", 10}, /* strlen(INT32_MAX) == 10 */
{"keepalives_count", NULL, NULL, NULL,
- "TCP-Keepalives-Count", "", 10}, /* strlen(INT32_MAX) == 10 */
+ "TCP-Keepalives-Count", "", 10}, /* strlen(INT32_MAX) == 10 */
#ifdef USE_SSL
static int
setKeepalivesIdle(PGconn *conn)
{
- int idle;
+ int idle;
if (conn->keepalives_idle == NULL)
return 1;
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPIDLE,
(char *) &idle, sizeof(idle)) < 0)
{
- char sebuf[256];
+ char sebuf[256];
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("setsockopt(TCP_KEEPIDLE) failed: %s\n"),
+ libpq_gettext("setsockopt(TCP_KEEPIDLE) failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return 0;
}
static int
setKeepalivesInterval(PGconn *conn)
{
- int interval;
+ int interval;
if (conn->keepalives_interval == NULL)
return 1;
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPINTVL,
(char *) &interval, sizeof(interval)) < 0)
{
- char sebuf[256];
+ char sebuf[256];
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("setsockopt(TCP_KEEPINTVL) failed: %s\n"),
+ libpq_gettext("setsockopt(TCP_KEEPINTVL) failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return 0;
}
static int
setKeepalivesCount(PGconn *conn)
{
- int count;
+ int count;
if (conn->keepalives_count == NULL)
return 1;
if (setsockopt(conn->sock, IPPROTO_TCP, TCP_KEEPCNT,
(char *) &count, sizeof(count)) < 0)
{
- char sebuf[256];
+ char sebuf[256];
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("setsockopt(TCP_KEEPCNT) failed: %s\n"),
+ libpq_gettext("setsockopt(TCP_KEEPCNT) failed: %s\n"),
SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
return 0;
}
if (!IS_AF_UNIX(addr_cur->ai_family))
{
- int on = 1;
- int usekeepalives = useKeepalives(conn);
- int err = 0;
+ int on = 1;
+ int usekeepalives = useKeepalives(conn);
+ int err = 0;
if (usekeepalives < 0)
{
{
appendPQExpBuffer(&conn->errorMessage,
libpq_gettext("setsockopt(SO_KEEPALIVE) failed: %s\n"),
- SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
+ SOCK_STRERROR(SOCK_ERRNO, sebuf, sizeof(sebuf)));
err = 1;
}
else if (!setKeepalivesIdle(conn)
error_return:
dot_pg_pass_warning(conn);
-
+
/*
* We used to close the socket at this point, but that makes it awkward
* for those above us if they wish to remove this socket from their own
}
-static bool getPgPassFilename(char *pgpassfile)
+static bool
+getPgPassFilename(char *pgpassfile)
{
char *passfile_env;
{
/* If it was 'invalid authorization', add .pgpass mention */
if (conn->dot_pgpass_used && conn->password_needed && conn->result &&
- /* only works with >= 9.0 servers */
+ /* only works with >= 9.0 servers */
strcmp(PQresultErrorField(conn->result, PG_DIAG_SQLSTATE),
- ERRCODE_INVALID_PASSWORD) == 0)
+ ERRCODE_INVALID_PASSWORD) == 0)
{
char pgpassfile[MAXPGPATH];
if (!getPgPassFilename(pgpassfile))
return;
appendPQExpBuffer(&conn->errorMessage,
- libpq_gettext("password retrieved from file \"%s\"\n"),
+ libpq_gettext("password retrieved from file \"%s\"\n"),
pgpassfile);
}
}
-
+
/*
* Obtain user's home directory, return in given buffer
*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.143 2010/05/09 02:16:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.144 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
FD_ZERO(&output_mask);
FD_ZERO(&except_mask);
if (forRead)
- FD_SET (sock, &input_mask);
+ FD_SET(sock, &input_mask);
if (forWrite)
- FD_SET (sock, &output_mask);
- FD_SET (sock, &except_mask);
+ FD_SET(sock, &output_mask);
+ FD_SET(sock, &except_mask);
/* Compute appropriate timeout interval */
if (end_time == ((time_t) -1))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.134 2010/05/26 21:39:27 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.135 2010/07/06 19:19:01 momjian Exp $
*
* NOTES
*
* Initialize (potentially) per-connection SSL data, namely the
* client certificate, private key, and trusted CA certs.
*
- * conn->ssl must already be created. It receives the connection's client
+ * conn->ssl must already be created. It receives the connection's client
* certificate and private key. Note however that certificates also get
* loaded into the SSL_context object, and are therefore accessible to all
* connections in this process. This should be OK as long as there aren't
{
/*
* If file is not present, just go on without a client cert; server
- * might or might not accept the connection. Any other error, however,
- * is grounds for complaint.
+ * might or might not accept the connection. Any other error,
+ * however, is grounds for complaint.
*/
if (errno != ENOENT)
{
{
/*
* Cert file exists, so load it. Since OpenSSL doesn't provide the
- * equivalent of "SSL_use_certificate_chain_file", we actually have
- * to load the file twice. The first call loads any extra certs
- * after the first one into chain-cert storage associated with the
- * SSL_context. The second call loads the first cert (only) into
- * the SSL object, where it will be correctly paired with the private
- * key we load below. We do it this way so that each connection
- * understands which subject cert to present, in case different sslcert
- * settings are used for different connections in the same process.
+ * equivalent of "SSL_use_certificate_chain_file", we actually have to
+ * load the file twice. The first call loads any extra certs after
+ * the first one into chain-cert storage associated with the
+ * SSL_context. The second call loads the first cert (only) into the
+ * SSL object, where it will be correctly paired with the private key
+ * we load below. We do it this way so that each connection
+ * understands which subject cert to present, in case different
+ * sslcert settings are used for different connections in the same
+ * process.
*/
if (SSL_CTX_use_certificate_chain_file(SSL_context, fnbuf) != 1)
{
* file */
}
else
-#endif /* USE_SSL_ENGINE */
+#endif /* USE_SSL_ENGINE */
{
/* PGSSLKEY is not an engine, treat it as a filename */
strncpy(fnbuf, conn->sslkey, sizeof(fnbuf));
{
/*
* stat() failed; assume root file doesn't exist. If sslmode is
- * verify-ca or verify-full, this is an error. Otherwise, continue
+ * verify-ca or verify-full, this is an error. Otherwise, continue
* without performing any server cert verification.
*/
if (conn->sslmode[0] == 'v') /* "verify-ca" or "verify-full" */
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.151 2010/06/23 21:54:13 rhaas Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.152 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *pgpass;
char *keepalives; /* use TCP keepalives? */
char *keepalives_idle; /* time between TCP keepalives */
- char *keepalives_interval; /* time between TCP keepalive retransmits */
- char *keepalives_count; /* maximum number of TCP keepalive retransmits */
+ char *keepalives_interval; /* time between TCP keepalive
+ * retransmits */
+ char *keepalives_count; /* maximum number of TCP keepalive
+ * retransmits */
char *sslmode; /* SSL mode (require,prefer,allow,disable) */
char *sslkey; /* client key filename */
char *sslcert; /* client certificate filename */
/**********************************************************************
* plperl.c - perl as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.178 2010/06/29 04:12:47 petere Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.179 2010/07/06 19:19:01 momjian Exp $
*
**********************************************************************/
static char *strip_trailing_ws(const char *msg);
static OP *pp_require_safe(pTHX);
static int restore_context(bool);
+
#ifdef WIN32
static char *setlocale_perl(int category, char *locale);
#endif
*
* If initialization fails due to, e.g., plperl_init_interp() throwing an
* exception, then we'll return here on the next usage and the user will
- * get a rather cryptic: ERROR: attempt to redefine parameter "plperl.use_strict"
+ * get a rather cryptic: ERROR: attempt to redefine parameter
+ * "plperl.use_strict"
*/
static bool inited = false;
HASHCTL hash_ctl;
STMT_START { \
if (saved != NULL) { setlocale_perl(name, saved); pfree(saved); } \
} STMT_END
-
#endif
if (plperl_on_init)
/*
* Record the original function for the 'require' and 'dofile' opcodes.
- * (They share the same implementation.) Ensure it's used for new interpreters.
+ * (They share the same implementation.) Ensure it's used for new
+ * interpreters.
*/
if (!pp_require_orig)
pp_require_orig = PL_ppaddr[OP_REQUIRE];
- else
+ else
{
PL_ppaddr[OP_REQUIRE] = pp_require_orig;
- PL_ppaddr[OP_DOFILE] = pp_require_orig;
+ PL_ppaddr[OP_DOFILE] = pp_require_orig;
}
#ifdef PLPERL_ENABLE_OPMASK_EARLY
+
/*
* For regression testing to prove that the PLC_PERLBOOT and PLC_TRUSTED
* code doesn't even compile any unsafe ops. In future there may be a
errcontext("while running Perl initialization")));
#ifdef PLPERL_RESTORE_LOCALE
- PLPERL_RESTORE_LOCALE(LC_COLLATE, save_collate);
- PLPERL_RESTORE_LOCALE(LC_CTYPE, save_ctype);
+ PLPERL_RESTORE_LOCALE(LC_COLLATE, save_collate);
+ PLPERL_RESTORE_LOCALE(LC_CTYPE, save_ctype);
PLPERL_RESTORE_LOCALE(LC_MONETARY, save_monetary);
- PLPERL_RESTORE_LOCALE(LC_NUMERIC, save_numeric);
- PLPERL_RESTORE_LOCALE(LC_TIME, save_time);
+ PLPERL_RESTORE_LOCALE(LC_NUMERIC, save_numeric);
+ PLPERL_RESTORE_LOCALE(LC_TIME, save_time);
#endif
return plperl;
static void
plperl_trusted_init(void)
{
- HV *stash;
- SV *sv;
- char *key;
- I32 klen;
-
+ HV *stash;
+ SV *sv;
+ char *key;
+ I32 klen;
+
/* use original require while we set up */
PL_ppaddr[OP_REQUIRE] = pp_require_orig;
PL_ppaddr[OP_DOFILE] = pp_require_orig;
-
+
eval_pv(PLC_TRUSTED, FALSE);
if (SvTRUE(ERRSV))
ereport(ERROR,
(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
errcontext("while executing PLC_TRUSTED")));
-
+
if (GetDatabaseEncoding() == PG_UTF8)
{
/*
- * Force loading of utf8 module now to prevent errors that can
- * arise from the regex code later trying to load utf8 modules.
- * See http://rt.perl.org/rt3/Ticket/Display.html?id=47576
+ * Force loading of utf8 module now to prevent errors that can arise
+ * from the regex code later trying to load utf8 modules. See
+ * http://rt.perl.org/rt3/Ticket/Display.html?id=47576
*/
eval_pv("my $a=chr(0x100); return $a =~ /\\xa9/i", FALSE);
if (SvTRUE(ERRSV))
(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
errcontext("while executing utf8fix")));
}
-
+
/*
* Lock down the interpreter
*/
-
+
/* switch to the safe require/dofile opcode for future code */
PL_ppaddr[OP_REQUIRE] = pp_require_safe;
- PL_ppaddr[OP_DOFILE] = pp_require_safe;
-
- /*
- * prevent (any more) unsafe opcodes being compiled
- * PL_op_mask is per interpreter, so this only needs to be set once
+ PL_ppaddr[OP_DOFILE] = pp_require_safe;
+
+ /*
+ * prevent (any more) unsafe opcodes being compiled PL_op_mask is per
+ * interpreter, so this only needs to be set once
*/
PL_op_mask = plperl_opmask;
-
+
/* delete the DynaLoader:: namespace so extensions can't be loaded */
stash = gv_stashpv("DynaLoader", GV_ADDWARN);
hv_iterinit(stash);
- while ((sv = hv_iternextsv(stash, &key, &klen)))
+ while ((sv = hv_iternextsv(stash, &key, &klen)))
{
if (!isGV_with_GP(sv) || !GvCV(sv))
continue;
SvREFCNT_dec(GvCV(sv)); /* free the CV */
- GvCV(sv) = NULL; /* prevent call via GV */
+ GvCV(sv) = NULL; /* prevent call via GV */
}
hv_clear(stash);
-
+
/* invalidate assorted caches */
++PL_sub_generation;
hv_clear(PL_stashcache);
-
+
/*
* Execute plperl.on_plperl_init in the locked-down interpreter
*/
ereport(ERROR,
(errmsg("%s", strip_trailing_ws(SvPV_nolen(ERRSV))),
errcontext("while executing plperl.on_plperl_init")));
-
+
}
}
if (!subref)
ereport(ERROR,
- (errmsg("didn't get a CODE reference from compiling function \"%s\"",
- prodesc->proname)));
-
+ (errmsg("didn't get a CODE reference from compiling function \"%s\"",
+ prodesc->proname)));
+
prodesc->reference = subref;
-
+
return;
}
static char *
setlocale_perl(int category, char *locale)
{
- char *RETVAL = setlocale(category, locale);
- if (RETVAL) {
+ char *RETVAL = setlocale(category, locale);
+
+ if (RETVAL)
+ {
#ifdef USE_LOCALE_CTYPE
- if (category == LC_CTYPE
+ if (category == LC_CTYPE
#ifdef LC_ALL
- || category == LC_ALL
+ || category == LC_ALL
#endif
- )
- {
- char *newctype;
+ )
+ {
+ char *newctype;
+
#ifdef LC_ALL
- if (category == LC_ALL)
- newctype = setlocale(LC_CTYPE, NULL);
- else
+ if (category == LC_ALL)
+ newctype = setlocale(LC_CTYPE, NULL);
+ else
#endif
- newctype = RETVAL;
- new_ctype(newctype);
- }
-#endif /* USE_LOCALE_CTYPE */
+ newctype = RETVAL;
+ new_ctype(newctype);
+ }
+#endif /* USE_LOCALE_CTYPE */
#ifdef USE_LOCALE_COLLATE
- if (category == LC_COLLATE
+ if (category == LC_COLLATE
#ifdef LC_ALL
- || category == LC_ALL
+ || category == LC_ALL
#endif
- )
- {
- char *newcoll;
+ )
+ {
+ char *newcoll;
+
#ifdef LC_ALL
- if (category == LC_ALL)
- newcoll = setlocale(LC_COLLATE, NULL);
- else
+ if (category == LC_ALL)
+ newcoll = setlocale(LC_COLLATE, NULL);
+ else
#endif
- newcoll = RETVAL;
- new_collate(newcoll);
- }
-#endif /* USE_LOCALE_COLLATE */
+ newcoll = RETVAL;
+ new_collate(newcoll);
+ }
+#endif /* USE_LOCALE_COLLATE */
#ifdef USE_LOCALE_NUMERIC
- if (category == LC_NUMERIC
+ if (category == LC_NUMERIC
#ifdef LC_ALL
- || category == LC_ALL
+ || category == LC_ALL
#endif
- )
- {
- char *newnum;
+ )
+ {
+ char *newnum;
+
#ifdef LC_ALL
- if (category == LC_ALL)
- newnum = setlocale(LC_NUMERIC, NULL);
- else
+ if (category == LC_ALL)
+ newnum = setlocale(LC_NUMERIC, NULL);
+ else
#endif
- newnum = RETVAL;
- new_numeric(newnum);
- }
-#endif /* USE_LOCALE_NUMERIC */
- }
+ newnum = RETVAL;
+ new_numeric(newnum);
+ }
+#endif /* USE_LOCALE_NUMERIC */
+ }
- return RETVAL;
+ return RETVAL;
}
+
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.260 2010/07/05 09:27:18 heikki Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.261 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
PLpgSQL_var *curvar;
char *curname = NULL;
- const char *portalname;
+ const char *portalname;
PLpgSQL_expr *query;
ParamListInfo paramLI;
Portal portal;
if (*ptr == 'S' || *ptr == 's')
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("EXECUTE of SELECT ... INTO is not implemented"),
- errhint("You might want to use EXECUTE ... INTO instead.")));
+ errmsg("EXECUTE of SELECT ... INTO is not implemented"),
+ errhint("You might want to use EXECUTE ... INTO instead.")));
break;
}
elog(ERROR, "unsupported target");
/*
- * Make sure the portal doesn't get closed by the user statements
- * we execute.
+ * Make sure the portal doesn't get closed by the user statements we
+ * execute.
*/
PinPortal(portal);
/**********************************************************************
* plpython.c - python as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.145 2010/06/29 00:18:11 petere Exp $
+ * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.146 2010/07/06 19:19:01 momjian Exp $
*
*********************************************************************
*/
PG_TRY();
{
plan->values[j] =
- plan->args[j].out.d.func(NULL, &(plan->args[j].out.d), elem);
+ plan->args[j].out.d.func(NULL, &(plan->args[j].out.d), elem);
}
PG_CATCH();
{
* pltcl.c - PostgreSQL support for Tcl as
* procedural language (PL)
*
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.133 2010/05/13 18:29:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.134 2010/07/06 19:19:01 momjian Exp $
*
**********************************************************************/
buflen = strlen(pmrelname) + 100;
buf = (char *) palloc(buflen);
snprintf(buf, buflen,
- "select modsrc from %s where modname = 'unknown' order by modseq",
+ "select modsrc from %s where modname = 'unknown' order by modseq",
pmrelname);
spi_rc = SPI_execute(buf, false, 0);
-/* $PostgreSQL: pgsql/src/port/crypt.c,v 1.16 2009/06/11 14:49:15 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/port/crypt.c,v 1.17 2010/07/06 19:19:01 momjian Exp $ */
/* $NetBSD: crypt.c,v 1.18 2001/03/01 14:37:35 wiz Exp $ */
/*
#if defined(B64)
B64 b64;
#endif
-} C_block;
+} C_block;
/*
* Convert twenty-four-bit long in host-order
* Win32 (NT4 and newer).
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.62 2010/04/02 15:21:20 mha Exp $
+ * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.63 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
WORD PrintNameOffset;
WORD PrintNameLength;
WCHAR PathBuffer[1];
-} REPARSE_JUNCTION_DATA_BUFFER;
+} REPARSE_JUNCTION_DATA_BUFFER;
#define REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE \
FIELD_OFFSET(REPARSE_JUNCTION_DATA_BUFFER, SubstituteNameOffset)
* must be replaced with recv/send.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/pipe.c,v 1.16 2010/01/02 16:58:13 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/pipe.c,v 1.17 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(0);
serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- if (bind(s, (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
+ if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to bind: %ui", WSAGetLastError())));
closesocket(s);
closesocket(s);
return -1;
}
- if (getsockname(s, (SOCKADDR *) &serv_addr, &len) == SOCKET_ERROR)
+ if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to getsockname: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
- if (connect(handles[1], (SOCKADDR *) &serv_addr, len) == SOCKET_ERROR)
+ if (connect(handles[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
{
ereport(LOG, (errmsg_internal("pgpipe failed to connect socket: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
- if ((handles[0] = accept(s, (SOCKADDR *) &serv_addr, &len)) == INVALID_SOCKET)
+ if ((handles[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET)
{
ereport(LOG, (errmsg_internal("pgpipe failed to accept socket: %ui", WSAGetLastError())));
closesocket(handles[1]);
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/port/snprintf.c,v 1.35 2008/03/18 01:49:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/snprintf.c,v 1.36 2010/07/06 19:19:01 momjian Exp $
*/
#include "c.h"
/* bufend == NULL is for sprintf, where we assume buf is big enough */
FILE *stream; /* eventual output destination, or NULL */
int nchars; /* # chars already sent to stream */
-} PrintfTarget;
+} PrintfTarget;
/*
* Info about the type and value of a formatting parameter. Note that we
ATYPE_LONGLONG,
ATYPE_DOUBLE,
ATYPE_CHARPTR
-} PrintfArgType;
+} PrintfArgType;
typedef union
{
int64 ll;
double d;
char *cptr;
-} PrintfArgValue;
+} PrintfArgValue;
-static void flushbuffer(PrintfTarget *target);
-static int dopr(PrintfTarget *target, const char *format, va_list args);
+static void flushbuffer(PrintfTarget * target);
+static int dopr(PrintfTarget * target, const char *format, va_list args);
int
/* call this only when stream is defined */
static void
-flushbuffer(PrintfTarget *target)
+flushbuffer(PrintfTarget * target)
{
size_t nc = target->bufptr - target->bufstart;
static void fmtstr(char *value, int leftjust, int minlen, int maxwidth,
- int pointflag, PrintfTarget *target);
-static void fmtptr(void *value, PrintfTarget *target);
+ int pointflag, PrintfTarget * target);
+static void fmtptr(void *value, PrintfTarget * target);
static void fmtint(int64 value, char type, int forcesign,
int leftjust, int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target);
-static void fmtchar(int value, int leftjust, int minlen, PrintfTarget *target);
+ PrintfTarget * target);
+static void fmtchar(int value, int leftjust, int minlen, PrintfTarget * target);
static void fmtfloat(double value, char type, int forcesign,
int leftjust, int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target);
-static void dostr(const char *str, int slen, PrintfTarget *target);
-static void dopr_outch(int c, PrintfTarget *target);
+ PrintfTarget * target);
+static void dostr(const char *str, int slen, PrintfTarget * target);
+static void dopr_outch(int c, PrintfTarget * target);
static int adjust_sign(int is_negative, int forcesign, int *signvalue);
static void adjust_padlen(int minlen, int vallen, int leftjust, int *padlen);
static void leading_pad(int zpad, int *signvalue, int *padlen,
- PrintfTarget *target);
-static void trailing_pad(int *padlen, PrintfTarget *target);
+ PrintfTarget * target);
+static void trailing_pad(int *padlen, PrintfTarget * target);
/*
* dopr(): poor man's version of doprintf
*/
static int
-dopr(PrintfTarget *target, const char *format, va_list args)
+dopr(PrintfTarget * target, const char *format, va_list args)
{
const char *format_start = format;
int ch;
static void
fmtstr(char *value, int leftjust, int minlen, int maxwidth,
- int pointflag, PrintfTarget *target)
+ int pointflag, PrintfTarget * target)
{
int padlen,
vallen; /* amount to pad */
}
static void
-fmtptr(void *value, PrintfTarget *target)
+fmtptr(void *value, PrintfTarget * target)
{
int vallen;
char convert[64];
static void
fmtint(int64 value, char type, int forcesign, int leftjust,
int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target)
+ PrintfTarget * target)
{
uint64 base;
int dosign;
}
static void
-fmtchar(int value, int leftjust, int minlen, PrintfTarget *target)
+fmtchar(int value, int leftjust, int minlen, PrintfTarget * target)
{
int padlen = 0; /* amount to pad */
static void
fmtfloat(double value, char type, int forcesign, int leftjust,
int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target)
+ PrintfTarget * target)
{
int signvalue = 0;
int vallen;
}
static void
-dostr(const char *str, int slen, PrintfTarget *target)
+dostr(const char *str, int slen, PrintfTarget * target)
{
while (slen > 0)
{
}
static void
-dopr_outch(int c, PrintfTarget *target)
+dopr_outch(int c, PrintfTarget * target)
{
if (target->bufend != NULL && target->bufptr >= target->bufend)
{
static void
-leading_pad(int zpad, int *signvalue, int *padlen, PrintfTarget *target)
+leading_pad(int zpad, int *signvalue, int *padlen, PrintfTarget * target)
{
if (*padlen > 0 && zpad)
{
static void
-trailing_pad(int *padlen, PrintfTarget *target)
+trailing_pad(int *padlen, PrintfTarget * target)
{
while (*padlen < 0)
{
/*
- * $PostgreSQL: pgsql/src/test/examples/testlibpq2.c,v 1.16 2009/12/31 00:16:47 adunstan Exp $
+ * $PostgreSQL: pgsql/src/test/examples/testlibpq2.c,v 1.17 2010/07/06 19:19:01 momjian Exp $
*
*
* testlibpq2.c
break; /* shouldn't happen */
FD_ZERO(&input_mask);
- FD_SET (sock, &input_mask);
+ FD_SET(sock, &input_mask);
if (select(sock + 1, &input_mask, NULL, NULL, NULL) < 0)
{
* Portions Copyright (c) 1996-2010, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.73 2010/05/20 14:13:11 mha Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.74 2010/07/06 19:19:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(errmsg("could not determine system time zone"),
errdetail("The PostgreSQL time zone will be set to \"%s\".",
"GMT"),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return NULL; /* go to GMT */
}
(errmsg("could not recognize system time zone"),
errdetail("The PostgreSQL time zone will be set to \"%s\".",
resultbuf),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return resultbuf;
}
if (!tm)
{
ereport(LOG,
- (errmsg("could not identify system time zone: localtime() failed"),
- errdetail("The PostgreSQL time zone will be set to \"%s\".",
- "GMT"),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ (errmsg("could not identify system time zone: localtime() failed"),
+ errdetail("The PostgreSQL time zone will be set to \"%s\".",
+ "GMT"),
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return NULL; /* go to GMT */
}
(int) GetLastError()),
errdetail("The PostgreSQL time zone will be set to \"%s\".",
"GMT"),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return NULL; /* go to GMT */
}
(errmsg_internal("could not query value for key \"std\" to identify system time zone \"%s\": %i",
keyname, (int) r)));
RegCloseKey(key);
- continue; /* Proceed to look at the next timezone */
+ continue; /* Proceed to look at the next timezone */
}
if (strcmp(tzname, zonename) == 0)
{
(errmsg_internal("could not query value for key \"dlt\" to identify system time zone \"%s\": %i",
keyname, (int) r)));
RegCloseKey(key);
- continue; /* Proceed to look at the next timezone */
+ continue; /* Proceed to look at the next timezone */
}
if (strcmp(tzname, zonename) == 0)
{
tzname),
errdetail("The PostgreSQL time zone will be set to \"%s\".",
"GMT"),
- errhint("You can specify the correct timezone in postgresql.conf.")));
- return NULL; /* go to GMT */
+ errhint("You can specify the correct timezone in postgresql.conf.")));
+ return NULL; /* go to GMT */
}
#endif /* WIN32 */
/* Step into the subdirectory */
if (dir->depth >= MAX_TZDIR_DEPTH - 1)
ereport(ERROR,
- (errmsg_internal("timezone directory stack overflow")));
+ (errmsg_internal("timezone directory stack overflow")));
dir->depth++;
dir->dirname[dir->depth] = pstrdup(fullname);
dir->dirdesc[dir->depth] = AllocateDir(fullname);
/*
- * $PostgreSQL: pgsql/src/tools/fsync/test_fsync.c,v 1.29 2010/07/04 13:42:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/tools/fsync/test_fsync.c,v 1.30 2010/07/06 19:19:02 momjian Exp $
*
*
* test_fsync.c
#define LABEL_FORMAT "\t%-30s"
-int loops = 10000;
+int loops = 10000;
void die(char *str);
void print_elapse(struct timeval start_t, struct timeval stop_t);
* Fsync another file descriptor?
*/
printf("\nTest if fsync on non-write file descriptor is honored:\n");
- printf("(If the times are similar, fsync() can sync data written\n");
+ printf("(If the times are similar, fsync() can sync data written\n");
printf("on a different descriptor.)\n");
/* write, fsync, close */
void
print_elapse(struct timeval start_t, struct timeval stop_t)
{
- double total_time = (stop_t.tv_sec - start_t.tv_sec) +
+ double total_time = (stop_t.tv_sec - start_t.tv_sec) +
/* usec subtraction might be negative, e.g. 5.4 - 4.8 */
- (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
- double per_second = loops / total_time;
-
+ (stop_t.tv_usec - start_t.tv_usec) * 0.000001;
+ double per_second = loops / total_time;
+
printf("%9.3f/second\n", per_second);
}