* - Add a pgstat config column to pg_database, so this
* entire thing can be enabled/disabled on a per db basis.
*
- * Copyright (c) 2001-2016, PostgreSQL Global Development Group
+ * Copyright (c) 2001-2019, PostgreSQL Global Development Group
*
* src/backend/postmaster/pgstat.c
* ----------
#include <arpa/inet.h>
#include <signal.h>
#include <time.h>
+#ifdef HAVE_SYS_SELECT_H
+#include <sys/select.h>
+#endif
#include "pgstat.h"
#include "access/heapam.h"
#include "access/htup_details.h"
+#include "access/tableam.h"
#include "access/transam.h"
#include "access/twophase_rmgr.h"
#include "access/xact.h"
#include "catalog/pg_database.h"
#include "catalog/pg_proc.h"
-#include "lib/ilist.h"
-#include "libpq/ip.h"
+#include "common/ip.h"
#include "libpq/libpq.h"
#include "libpq/pqsignal.h"
#include "mb/pg_wchar.h"
#include "postmaster/autovacuum.h"
#include "postmaster/fork_process.h"
#include "postmaster/postmaster.h"
+#include "replication/walsender.h"
#include "storage/backendid.h"
#include "storage/dsm.h"
#include "storage/fd.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
#include "utils/timestamp.h"
-#include "utils/tqual.h"
/* ----------
* Timer definitions.
* ----------
*/
-#define PGSTAT_STAT_INTERVAL 500 /* Minimum time between stats file
- * updates; in milliseconds. */
+#define PGSTAT_STAT_INTERVAL 500 /* Minimum time between stats file
+ * updates; in milliseconds. */
-#define PGSTAT_RETRY_DELAY 10 /* How long to wait between checks for
- * a new file; in milliseconds. */
+#define PGSTAT_RETRY_DELAY 10 /* How long to wait between checks for a
+ * new file; in milliseconds. */
#define PGSTAT_MAX_WAIT_TIME 10000 /* Maximum time to wait for a stats
* file update; in milliseconds. */
-#define PGSTAT_INQ_INTERVAL 640 /* How often to ping the collector for
- * a new file; in milliseconds. */
+#define PGSTAT_INQ_INTERVAL 640 /* How often to ping the collector for a
+ * new file; in milliseconds. */
-#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
- * failed statistics collector; in
- * seconds. */
+#define PGSTAT_RESTART_INTERVAL 60 /* How often to attempt to restart a
+ * failed statistics collector; in
+ * seconds. */
#define PGSTAT_POLL_LOOP_COUNT (PGSTAT_MAX_WAIT_TIME / PGSTAT_RETRY_DELAY)
#define PGSTAT_INQ_LOOP_COUNT (PGSTAT_INQ_INTERVAL / PGSTAT_RETRY_DELAY)
+/* Minimum receive buffer size for the collector's socket. */
+#define PGSTAT_MIN_RCVBUF (100 * 1024)
+
/* ----------
* The initial size hints for the hash tables used in the collector.
#define PGSTAT_FUNCTION_HASH_SIZE 512
+/* ----------
+ * Total number of backends including auxiliary
+ *
+ * We reserve a slot for each possible BackendId, plus one for each
+ * possible auxiliary process type. (This scheme assumes there is not
+ * more than one of any auxiliary process type at a time.) MaxBackends
+ * includes autovacuum workers and background workers as well.
+ * ----------
+ */
+#define NumBackendStatSlots (MaxBackends + NUM_AUXPROCTYPES)
+
+
/* ----------
* GUC parameters
* ----------
static TabStatusArray *pgStatTabList = NULL;
+/*
+ * pgStatTabHash entry: map from relation OID to PgStat_TableStatus pointer
+ */
+typedef struct TabStatHashEntry
+{
+ Oid t_id;
+ PgStat_TableStatus *tsa_entry;
+} TabStatHashEntry;
+
+/*
+ * Hash table for O(1) t_id -> tsa_entry lookup
+ */
+static HTAB *pgStatTabHash = NULL;
+
/*
* Backends store per-function info that's waiting to be sent to the collector
* in this hash table (indexed by function OID).
{
int nest_level; /* subtransaction nest level */
struct PgStat_SubXactStatus *prev; /* higher-level subxact if any */
- PgStat_TableXactStatus *first; /* head of list for this subxact */
+ PgStat_TableXactStatus *first; /* head of list for this subxact */
} PgStat_SubXactStatus;
static PgStat_SubXactStatus *pgStatXactStack = NULL;
/* Record that's written to 2PC state file when pgstat state is persisted */
typedef struct TwoPhasePgStatRecord
{
- PgStat_Counter tuples_inserted; /* tuples inserted in xact */
- PgStat_Counter tuples_updated; /* tuples updated in xact */
- PgStat_Counter tuples_deleted; /* tuples deleted in xact */
+ PgStat_Counter tuples_inserted; /* tuples inserted in xact */
+ PgStat_Counter tuples_updated; /* tuples updated in xact */
+ PgStat_Counter tuples_deleted; /* tuples deleted in xact */
PgStat_Counter inserted_pre_trunc; /* tuples inserted prior to truncate */
PgStat_Counter updated_pre_trunc; /* tuples updated prior to truncate */
PgStat_Counter deleted_pre_trunc; /* tuples deleted prior to truncate */
*/
static MemoryContext pgStatLocalContext = NULL;
static HTAB *pgStatDBHash = NULL;
+
+/* Status for backends including auxiliary */
static LocalPgBackendStatus *localBackendStatusTable = NULL;
+
+/* Total number of backends including auxiliary */
static int localNumBackends = 0;
/*
static PgStat_ArchiverStats archiverStats;
static PgStat_GlobalStats globalStats;
-/* Write request info for each database */
-typedef struct DBWriteRequest
-{
- Oid databaseid; /* OID of the database to write */
- TimestampTz request_time; /* timestamp of the last write request */
- slist_node next;
-} DBWriteRequest;
-
-/* Latest statistics request times from backends */
-static slist_head last_statrequests = SLIST_STATIC_INIT(last_statrequests);
+/*
+ * List of OIDs of databases we need to write out. If an entry is InvalidOid,
+ * it means to write only the shared-catalog stats ("DB 0"); otherwise, we
+ * will write both that DB's data and the shared stats.
+ */
+static List *pending_write_requests = NIL;
+/* Signal handler flags */
static volatile bool need_exit = false;
static volatile bool got_SIGHUP = false;
static void pgstat_send_tabstat(PgStat_MsgTabstat *tsmsg);
static void pgstat_send_funcstats(void);
-static HTAB *pgstat_collect_oids(Oid catalogid);
+static HTAB *pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid);
static PgStat_TableStatus *get_tabstat_entry(Oid rel_id, bool isshared);
static void pgstat_setup_memcxt(void);
+static const char *pgstat_get_wait_activity(WaitEventActivity w);
+static const char *pgstat_get_wait_client(WaitEventClient w);
+static const char *pgstat_get_wait_ipc(WaitEventIPC w);
+static const char *pgstat_get_wait_timeout(WaitEventTimeout w);
+static const char *pgstat_get_wait_io(WaitEventIO w);
+
static void pgstat_setheader(PgStat_MsgHdr *hdr, StatMsgType mtype);
static void pgstat_send(void *msg, int len);
static void pgstat_recv_funcpurge(PgStat_MsgFuncpurge *msg, int len);
static void pgstat_recv_recoveryconflict(PgStat_MsgRecoveryConflict *msg, int len);
static void pgstat_recv_deadlock(PgStat_MsgDeadlock *msg, int len);
+static void pgstat_recv_checksum_failure(PgStat_MsgChecksumFailure *msg, int len);
static void pgstat_recv_tempfile(PgStat_MsgTempFile *msg, int len);
/* ------------------------------------------------------------
* compile-time cross-check that we didn't.
*/
StaticAssertStmt(sizeof(PgStat_Msg) <= PGSTAT_MAX_MSG_SIZE,
- "maximum stats message size exceeds PGSTAT_MAX_MSG_SIZE");
+ "maximum stats message size exceeds PGSTAT_MAX_MSG_SIZE");
/*
* Create the UDP socket for sending and receiving statistic messages
if (++tries > 1)
ereport(LOG,
- (errmsg("trying another address for the statistics collector")));
+ (errmsg("trying another address for the statistics collector")));
/*
* Create the socket.
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not create socket for statistics collector: %m")));
+ errmsg("could not create socket for statistics collector: %m")));
continue;
}
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not bind socket for statistics collector: %m")));
+ errmsg("could not bind socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = PGINVALID_SOCKET;
continue;
}
alen = sizeof(pgStatAddr);
- if (getsockname(pgStatSock, (struct sockaddr *) & pgStatAddr, &alen) < 0)
+ if (getsockname(pgStatSock, (struct sockaddr *) &pgStatAddr, &alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
* provides a kernel-level check that only packets from this same
* address will be received.
*/
- if (connect(pgStatSock, (struct sockaddr *) & pgStatAddr, alen) < 0)
+ if (connect(pgStatSock, (struct sockaddr *) &pgStatAddr, alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not connect socket for statistics collector: %m")));
+ errmsg("could not connect socket for statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = PGINVALID_SOCKET;
continue;
goto startup_failed;
}
+ /*
+ * Try to ensure that the socket's receive buffer is at least
+ * PGSTAT_MIN_RCVBUF bytes, so that it won't easily overflow and lose
+ * data. Use of UDP protocol means that we are willing to lose data under
+ * heavy load, but we don't want it to happen just because of ridiculously
+ * small default buffer sizes (such as 8KB on older Windows versions).
+ */
+ {
+ int old_rcvbuf;
+ int new_rcvbuf;
+ ACCEPT_TYPE_ARG3 rcvbufsize = sizeof(old_rcvbuf);
+
+ if (getsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
+ (char *) &old_rcvbuf, &rcvbufsize) < 0)
+ {
+ elog(LOG, "getsockopt(SO_RCVBUF) failed: %m");
+ /* if we can't get existing size, always try to set it */
+ old_rcvbuf = 0;
+ }
+
+ new_rcvbuf = PGSTAT_MIN_RCVBUF;
+ if (old_rcvbuf < new_rcvbuf)
+ {
+ if (setsockopt(pgStatSock, SOL_SOCKET, SO_RCVBUF,
+ (char *) &new_rcvbuf, sizeof(new_rcvbuf)) < 0)
+ elog(LOG, "setsockopt(SO_RCVBUF) failed: %m");
+ }
+ }
+
pg_freeaddrinfo_all(hints.ai_family, addrs);
return;
startup_failed:
ereport(LOG,
- (errmsg("disabling statistics collector for lack of working socket")));
+ (errmsg("disabling statistics collector for lack of working socket")));
if (addrs)
pg_freeaddrinfo_all(hints.ai_family, addrs);
{
DIR *dir;
struct dirent *entry;
- char fname[MAXPGPATH];
+ char fname[MAXPGPATH * 2];
dir = AllocateDir(directory);
while ((entry = ReadDir(dir, directory)) != NULL)
strcmp(entry->d_name + nchars, "stat") != 0)
continue;
- snprintf(fname, MAXPGPATH, "%s/%s", directory,
+ snprintf(fname, sizeof(fname), "%s/%s", directory,
entry->d_name);
unlink(fname);
}
return postmaster_forkexec(ac, av);
}
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
/* ----------
* pgstat_report_stat() -
*
- * Called from tcop/postgres.c to send the so far collected per-table
- * and function usage statistics to the collector. Note that this is
- * called only when not within a transaction, so it is fair to use
+ * Must be called by processes that performs DML: tcop/postgres.c, logical
+ * receiver processes, SPI worker, etc. to send the so far collected
+ * per-table and function usage statistics to the collector. Note that this
+ * is called only when not within a transaction, so it is fair to use
* transaction stop time as an approximation of current time.
* ----------
*/
return;
last_report = now;
+ /*
+ * Destroy pgStatTabHash before we start invalidating PgStat_TableEntry
+ * entries it points to. (Should we fail partway through the loop below,
+ * it's okay to have removed the hashtable already --- the only
+ * consequence is we'd get multiple entries for the same table in the
+ * pgStatTabList, and that's safe.)
+ */
+ if (pgStatTabHash)
+ hash_destroy(pgStatTabHash);
+ pgStatTabHash = NULL;
+
/*
* Scan through the TabStatusArray struct(s) to find tables that actually
* have counts, and build messages to send. We have to separate shared
/*
* Read pg_database and make a list of OIDs of all existing databases
*/
- htab = pgstat_collect_oids(DatabaseRelationId);
+ htab = pgstat_collect_oids(DatabaseRelationId, Anum_pg_database_oid);
/*
* Search the database hash table for dead databases and tell the
/*
* Similarly to above, make a list of all known relations in this DB.
*/
- htab = pgstat_collect_oids(RelationRelationId);
+ htab = pgstat_collect_oids(RelationRelationId, Anum_pg_class_oid);
/*
* Initialize our messages table counter to zero
if (msg.m_nentries >= PGSTAT_NUM_TABPURGE)
{
len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
- +msg.m_nentries * sizeof(Oid);
+ + msg.m_nentries * sizeof(Oid);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TABPURGE);
msg.m_databaseid = MyDatabaseId;
if (msg.m_nentries > 0)
{
len = offsetof(PgStat_MsgTabpurge, m_tableid[0])
- +msg.m_nentries * sizeof(Oid);
+ + msg.m_nentries * sizeof(Oid);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TABPURGE);
msg.m_databaseid = MyDatabaseId;
if (dbentry->functions != NULL &&
hash_get_num_entries(dbentry->functions) > 0)
{
- htab = pgstat_collect_oids(ProcedureRelationId);
+ htab = pgstat_collect_oids(ProcedureRelationId, Anum_pg_proc_oid);
pgstat_setheader(&f_msg.m_hdr, PGSTAT_MTYPE_FUNCPURGE);
f_msg.m_databaseid = MyDatabaseId;
if (f_msg.m_nentries >= PGSTAT_NUM_FUNCPURGE)
{
len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
- +f_msg.m_nentries * sizeof(Oid);
+ + f_msg.m_nentries * sizeof(Oid);
pgstat_send(&f_msg, len);
if (f_msg.m_nentries > 0)
{
len = offsetof(PgStat_MsgFuncpurge, m_functionid[0])
- +f_msg.m_nentries * sizeof(Oid);
+ + f_msg.m_nentries * sizeof(Oid);
pgstat_send(&f_msg, len);
}
* ----------
*/
static HTAB *
-pgstat_collect_oids(Oid catalogid)
+pgstat_collect_oids(Oid catalogid, AttrNumber anum_oid)
{
HTAB *htab;
HASHCTL hash_ctl;
Relation rel;
- HeapScanDesc scan;
+ TableScanDesc scan;
HeapTuple tup;
Snapshot snapshot;
&hash_ctl,
HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
- rel = heap_open(catalogid, AccessShareLock);
+ rel = table_open(catalogid, AccessShareLock);
snapshot = RegisterSnapshot(GetLatestSnapshot());
- scan = heap_beginscan(rel, snapshot, 0, NULL);
+ scan = table_beginscan(rel, snapshot, 0, NULL);
while ((tup = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid thisoid = HeapTupleGetOid(tup);
+ Oid thisoid;
+ bool isnull;
+
+ thisoid = heap_getattr(tup, anum_oid, RelationGetDescr(rel), &isnull);
+ Assert(!isnull);
CHECK_FOR_INTERRUPTS();
(void) hash_search(htab, (void *) &thisoid, HASH_ENTER, NULL);
}
- heap_endscan(scan);
+ table_endscan(scan);
UnregisterSnapshot(snapshot);
- heap_close(rel, AccessShareLock);
+ table_close(rel, AccessShareLock);
return htab;
}
msg.m_tableid[0] = relid;
msg.m_nentries = 1;
- len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) +sizeof(Oid);
+ len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) + sizeof(Oid);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TABPURGE);
msg.m_databaseid = MyDatabaseId;
pgstat_send(&msg, len);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/* ----------
* pgstat_report_analyze() -
*
* Tell the collector about the table we just analyzed.
+ *
+ * Caller must provide new live- and dead-tuples estimates, as well as a
+ * flag indicating whether to reset the changes_since_analyze counter.
* --------
*/
void
pgstat_report_analyze(Relation rel,
- PgStat_Counter livetuples, PgStat_Counter deadtuples)
+ PgStat_Counter livetuples, PgStat_Counter deadtuples,
+ bool resetcounter)
{
PgStat_MsgAnalyze msg;
msg.m_databaseid = rel->rd_rel->relisshared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = RelationGetRelid(rel);
msg.m_autovacuum = IsAutoVacuumWorkerProcess();
+ msg.m_resetcounter = resetcounter;
msg.m_analyzetime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
pgstat_send(&msg, sizeof(msg));
}
+
+
+/* --------
+ * pgstat_report_checksum_failures_in_db(dboid, failure_count) -
+ *
+ * Tell the collector about one or more checksum failures.
+ * --------
+ */
+void
+pgstat_report_checksum_failures_in_db(Oid dboid, int failurecount)
+{
+ PgStat_MsgChecksumFailure msg;
+
+ if (pgStatSock == PGINVALID_SOCKET || !pgstat_track_counts)
+ return;
+
+ pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_CHECKSUMFAILURE);
+ msg.m_databaseid = dboid;
+ msg.m_failurecount = failurecount;
+ pgstat_send(&msg, sizeof(msg));
+}
+
+/* --------
+ * pgstat_report_checksum_failure() -
+ *
+ * Tell the collector about a checksum failure.
+ * --------
+ */
+void
+pgstat_report_checksum_failure(void)
+{
+ pgstat_report_checksum_failures_in_db(MyDatabaseId, 1);
+}
+
/* --------
* pgstat_report_tempfile() -
*
* Called by the executor before invoking a function.
*/
void
-pgstat_init_function_usage(FunctionCallInfoData *fcinfo,
+pgstat_init_function_usage(FunctionCallInfo fcinfo,
PgStat_FunctionCallUsage *fcu)
{
PgStat_BackendFunctionEntry *htabent;
static PgStat_TableStatus *
get_tabstat_entry(Oid rel_id, bool isshared)
{
+ TabStatHashEntry *hash_entry;
PgStat_TableStatus *entry;
TabStatusArray *tsa;
- TabStatusArray *prev_tsa;
- int i;
+ bool found;
/*
- * Search the already-used tabstat slots for this relation.
+ * Create hash table if we don't have it already.
*/
- prev_tsa = NULL;
- for (tsa = pgStatTabList; tsa != NULL; prev_tsa = tsa, tsa = tsa->tsa_next)
+ if (pgStatTabHash == NULL)
{
- for (i = 0; i < tsa->tsa_used; i++)
- {
- entry = &tsa->tsa_entries[i];
- if (entry->t_id == rel_id)
- return entry;
- }
+ HASHCTL ctl;
- if (tsa->tsa_used < TABSTAT_QUANTUM)
- {
- /*
- * It must not be present, but we found a free slot instead. Fine,
- * let's use this one. We assume the entry was already zeroed,
- * either at creation or after last use.
- */
- entry = &tsa->tsa_entries[tsa->tsa_used++];
- entry->t_id = rel_id;
- entry->t_shared = isshared;
- return entry;
- }
+ memset(&ctl, 0, sizeof(ctl));
+ ctl.keysize = sizeof(Oid);
+ ctl.entrysize = sizeof(TabStatHashEntry);
+
+ pgStatTabHash = hash_create("pgstat TabStatusArray lookup hash table",
+ TABSTAT_QUANTUM,
+ &ctl,
+ HASH_ELEM | HASH_BLOBS);
}
/*
- * We ran out of tabstat slots, so allocate more. Be sure they're zeroed.
+ * Find an entry or create a new one.
*/
- tsa = (TabStatusArray *) MemoryContextAllocZero(TopMemoryContext,
- sizeof(TabStatusArray));
- if (prev_tsa)
- prev_tsa->tsa_next = tsa;
- else
- pgStatTabList = tsa;
+ hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_ENTER, &found);
+ if (!found)
+ {
+ /* initialize new entry with null pointer */
+ hash_entry->tsa_entry = NULL;
+ }
+
+ /*
+ * If entry is already valid, we're done.
+ */
+ if (hash_entry->tsa_entry)
+ return hash_entry->tsa_entry;
+
+ /*
+ * Locate the first pgStatTabList entry with free space, making a new list
+ * entry if needed. Note that we could get an OOM failure here, but if so
+ * we have left the hashtable and the list in a consistent state.
+ */
+ if (pgStatTabList == NULL)
+ {
+ /* Set up first pgStatTabList entry */
+ pgStatTabList = (TabStatusArray *)
+ MemoryContextAllocZero(TopMemoryContext,
+ sizeof(TabStatusArray));
+ }
+
+ tsa = pgStatTabList;
+ while (tsa->tsa_used >= TABSTAT_QUANTUM)
+ {
+ if (tsa->tsa_next == NULL)
+ tsa->tsa_next = (TabStatusArray *)
+ MemoryContextAllocZero(TopMemoryContext,
+ sizeof(TabStatusArray));
+ tsa = tsa->tsa_next;
+ }
/*
- * Use the first entry of the new TabStatusArray.
+ * Allocate a PgStat_TableStatus entry within this list entry. We assume
+ * the entry was already zeroed, either at creation or after last use.
*/
entry = &tsa->tsa_entries[tsa->tsa_used++];
entry->t_id = rel_id;
entry->t_shared = isshared;
+
+ /*
+ * Now we can fill the entry in pgStatTabHash.
+ */
+ hash_entry->tsa_entry = entry;
+
return entry;
}
* find_tabstat_entry - find any existing PgStat_TableStatus entry for rel
*
* If no entry, return NULL, don't create a new one
+ *
+ * Note: if we got an error in the most recent execution of pgstat_report_stat,
+ * it's possible that an entry exists but there's no hashtable entry for it.
+ * That's okay, we'll treat this case as "doesn't exist".
*/
PgStat_TableStatus *
find_tabstat_entry(Oid rel_id)
{
- PgStat_TableStatus *entry;
- TabStatusArray *tsa;
- int i;
+ TabStatHashEntry *hash_entry;
- for (tsa = pgStatTabList; tsa != NULL; tsa = tsa->tsa_next)
- {
- for (i = 0; i < tsa->tsa_used; i++)
- {
- entry = &tsa->tsa_entries[i];
- if (entry->t_id == rel_id)
- return entry;
- }
- }
+ /* If hashtable doesn't exist, there are no entries at all */
+ if (!pgStatTabHash)
+ return NULL;
- /* Not present */
- return NULL;
+ hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
+ if (!hash_entry)
+ return NULL;
+
+ /* Note that this step could also return NULL, but that's correct */
+ return hash_entry->tsa_entry;
}
/*
* pgstat_count_heap_insert - count a tuple insertion of n tuples
*/
void
-pgstat_count_heap_insert(Relation rel, int n)
+pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
{
PgStat_TableStatus *pgstat_info = rel->pgstat_info;
* ----------
*/
void
-AtEOXact_PgStat(bool isCommit)
+AtEOXact_PgStat(bool isCommit, bool parallel)
{
PgStat_SubXactStatus *xact_state;
- /*
- * Count transaction commit or abort. (We use counters, not just bools,
- * in case the reporting message isn't sent right away.)
- */
- if (isCommit)
- pgStatXactCommit++;
- else
- pgStatXactRollback++;
+ /* Don't count parallel worker transaction stats */
+ if (!parallel)
+ {
+ /*
+ * Count transaction commit or abort. (We use counters, not just
+ * bools, in case the reporting message isn't sent right away.)
+ */
+ if (isCommit)
+ pgStatXactCommit++;
+ else
+ pgStatXactRollback++;
+ }
/*
* Transfer transactional insert/update counts into the base tabstat
pgstat_info->t_counts.t_tuples_updated += rec->tuples_updated;
pgstat_info->t_counts.t_tuples_deleted += rec->tuples_deleted;
pgstat_info->t_counts.t_truncated = rec->t_truncated;
-
+ if (rec->t_truncated)
+ {
+ /* forget live/dead stats seen by backend thus far */
+ pgstat_info->t_counts.t_delta_live_tuples = 0;
+ pgstat_info->t_counts.t_delta_dead_tuples = 0;
+ }
pgstat_info->t_counts.t_delta_live_tuples +=
rec->tuples_inserted - rec->tuples_deleted;
pgstat_info->t_counts.t_delta_dead_tuples +=
/* ----------
* pgstat_fetch_stat_local_beentry() -
*
- * Like pgstat_fetch_stat_beentry() but with locally computed addtions (like
+ * Like pgstat_fetch_stat_beentry() but with locally computed additions (like
* xid and xmin values of the backend)
*
* NB: caller is responsible for a check if the user is permitted to see
#ifdef USE_SSL
static PgBackendSSLStatus *BackendSslStatusBuffer = NULL;
#endif
+#ifdef ENABLE_GSS
+static PgBackendGSSStatus *BackendGssStatusBuffer = NULL;
+#endif
/*
Size size;
/* BackendStatusArray: */
- size = mul_size(sizeof(PgBackendStatus), MaxBackends);
+ size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
/* BackendAppnameBuffer: */
size = add_size(size,
- mul_size(NAMEDATALEN, MaxBackends));
+ mul_size(NAMEDATALEN, NumBackendStatSlots));
/* BackendClientHostnameBuffer: */
size = add_size(size,
- mul_size(NAMEDATALEN, MaxBackends));
+ mul_size(NAMEDATALEN, NumBackendStatSlots));
/* BackendActivityBuffer: */
size = add_size(size,
- mul_size(pgstat_track_activity_query_size, MaxBackends));
+ mul_size(pgstat_track_activity_query_size, NumBackendStatSlots));
#ifdef USE_SSL
/* BackendSslStatusBuffer: */
size = add_size(size,
- mul_size(sizeof(PgBackendSSLStatus), MaxBackends));
+ mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots));
#endif
return size;
}
char *buffer;
/* Create or attach to the shared array */
- size = mul_size(sizeof(PgBackendStatus), MaxBackends);
+ size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
BackendStatusArray = (PgBackendStatus *)
ShmemInitStruct("Backend Status Array", size, &found);
}
/* Create or attach to the shared appname buffer */
- size = mul_size(NAMEDATALEN, MaxBackends);
+ size = mul_size(NAMEDATALEN, NumBackendStatSlots);
BackendAppnameBuffer = (char *)
ShmemInitStruct("Backend Application Name Buffer", size, &found);
/* Initialize st_appname pointers. */
buffer = BackendAppnameBuffer;
- for (i = 0; i < MaxBackends; i++)
+ for (i = 0; i < NumBackendStatSlots; i++)
{
BackendStatusArray[i].st_appname = buffer;
buffer += NAMEDATALEN;
}
/* Create or attach to the shared client hostname buffer */
- size = mul_size(NAMEDATALEN, MaxBackends);
+ size = mul_size(NAMEDATALEN, NumBackendStatSlots);
BackendClientHostnameBuffer = (char *)
ShmemInitStruct("Backend Client Host Name Buffer", size, &found);
/* Initialize st_clienthostname pointers. */
buffer = BackendClientHostnameBuffer;
- for (i = 0; i < MaxBackends; i++)
+ for (i = 0; i < NumBackendStatSlots; i++)
{
BackendStatusArray[i].st_clienthostname = buffer;
buffer += NAMEDATALEN;
/* Create or attach to the shared activity buffer */
BackendActivityBufferSize = mul_size(pgstat_track_activity_query_size,
- MaxBackends);
+ NumBackendStatSlots);
BackendActivityBuffer = (char *)
ShmemInitStruct("Backend Activity Buffer",
BackendActivityBufferSize,
if (!found)
{
- MemSet(BackendActivityBuffer, 0, size);
+ MemSet(BackendActivityBuffer, 0, BackendActivityBufferSize);
/* Initialize st_activity pointers. */
buffer = BackendActivityBuffer;
- for (i = 0; i < MaxBackends; i++)
+ for (i = 0; i < NumBackendStatSlots; i++)
{
- BackendStatusArray[i].st_activity = buffer;
+ BackendStatusArray[i].st_activity_raw = buffer;
buffer += pgstat_track_activity_query_size;
}
}
#ifdef USE_SSL
/* Create or attach to the shared SSL status buffer */
- size = mul_size(sizeof(PgBackendSSLStatus), MaxBackends);
+ size = mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots);
BackendSslStatusBuffer = (PgBackendSSLStatus *)
ShmemInitStruct("Backend SSL Status Buffer", size, &found);
/* Initialize st_sslstatus pointers. */
ptr = BackendSslStatusBuffer;
- for (i = 0; i < MaxBackends; i++)
+ for (i = 0; i < NumBackendStatSlots; i++)
{
BackendStatusArray[i].st_sslstatus = ptr;
ptr++;
}
}
#endif
+
+#ifdef ENABLE_GSS
+ /* Create or attach to the shared GSSAPI status buffer */
+ size = mul_size(sizeof(PgBackendGSSStatus), NumBackendStatSlots);
+ BackendGssStatusBuffer = (PgBackendGSSStatus *)
+ ShmemInitStruct("Backend GSS Status Buffer", size, &found);
+
+ if (!found)
+ {
+ PgBackendGSSStatus *ptr;
+
+ MemSet(BackendGssStatusBuffer, 0, size);
+
+ /* Initialize st_gssstatus pointers. */
+ ptr = BackendGssStatusBuffer;
+ for (i = 0; i < NumBackendStatSlots; i++)
+ {
+ BackendStatusArray[i].st_gssstatus = ptr;
+ ptr++;
+ }
+ }
+#endif
}
* pgstat_initialize() -
*
* Initialize pgstats state, and set up our on-proc-exit hook.
- * Called from InitPostgres. MyBackendId must be set,
+ * Called from InitPostgres and AuxiliaryProcessMain. For auxiliary process,
+ * MyBackendId is invalid. Otherwise, MyBackendId must be set,
* but we must not have started any transaction yet (since the
* exit hook must run after the last transaction exit).
* NOTE: MyDatabaseId isn't set yet; so the shutdown hook has to be careful.
pgstat_initialize(void)
{
/* Initialize MyBEEntry */
- Assert(MyBackendId >= 1 && MyBackendId <= MaxBackends);
- MyBEEntry = &BackendStatusArray[MyBackendId - 1];
+ if (MyBackendId != InvalidBackendId)
+ {
+ Assert(MyBackendId >= 1 && MyBackendId <= MaxBackends);
+ MyBEEntry = &BackendStatusArray[MyBackendId - 1];
+ }
+ else
+ {
+ /* Must be an auxiliary process */
+ Assert(MyAuxProcType != NotAnAuxProcess);
+
+ /*
+ * Assign the MyBEEntry for an auxiliary process. Since it doesn't
+ * have a BackendId, the slot is statically allocated based on the
+ * auxiliary process type (MyAuxProcType). Backends use slots indexed
+ * in the range from 1 to MaxBackends (inclusive), so we use
+ * MaxBackends + AuxBackendType + 1 as the index of the slot for an
+ * auxiliary process.
+ */
+ MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
+ }
/* Set up a process-exit hook to clean up */
on_shmem_exit(pgstat_beshutdown_hook, 0);
*
* Initialize this backend's entry in the PgBackendStatus array.
* Called from InitPostgres.
- * MyDatabaseId, session userid, and application_name must be set
- * (hence, this cannot be combined with pgstat_initialize).
+ *
+ * Apart from auxiliary processes, MyBackendId, MyDatabaseId,
+ * session userid, and application_name must be set for a
+ * backend (hence, this cannot be combined with pgstat_initialize).
* ----------
*/
void
pgstat_bestart(void)
{
- TimestampTz proc_start_timestamp;
- Oid userid;
SockAddr clientaddr;
volatile PgBackendStatus *beentry;
/*
* To minimize the time spent modifying the PgBackendStatus entry, fetch
* all the needed data first.
- *
- * If we have a MyProcPort, use its session start time (for consistency,
- * and to save a kernel call).
*/
- if (MyProcPort)
- proc_start_timestamp = MyProcPort->SessionStartTime;
- else
- proc_start_timestamp = GetCurrentTimestamp();
- userid = GetSessionUserId();
/*
* We may not have a MyProcPort (eg, if this is the autovacuum process).
* cute.
*/
beentry = MyBEEntry;
+
+ /* pgstats state must be initialized from pgstat_initialize() */
+ Assert(beentry != NULL);
+
+ if (MyBackendId != InvalidBackendId)
+ {
+ if (IsAutoVacuumLauncherProcess())
+ {
+ /* Autovacuum Launcher */
+ beentry->st_backendType = B_AUTOVAC_LAUNCHER;
+ }
+ else if (IsAutoVacuumWorkerProcess())
+ {
+ /* Autovacuum Worker */
+ beentry->st_backendType = B_AUTOVAC_WORKER;
+ }
+ else if (am_walsender)
+ {
+ /* Wal sender */
+ beentry->st_backendType = B_WAL_SENDER;
+ }
+ else if (IsBackgroundWorker)
+ {
+ /* bgworker */
+ beentry->st_backendType = B_BG_WORKER;
+ }
+ else
+ {
+ /* client-backend */
+ beentry->st_backendType = B_BACKEND;
+ }
+ }
+ else
+ {
+ /* Must be an auxiliary process */
+ Assert(MyAuxProcType != NotAnAuxProcess);
+ switch (MyAuxProcType)
+ {
+ case StartupProcess:
+ beentry->st_backendType = B_STARTUP;
+ break;
+ case BgWriterProcess:
+ beentry->st_backendType = B_BG_WRITER;
+ break;
+ case CheckpointerProcess:
+ beentry->st_backendType = B_CHECKPOINTER;
+ break;
+ case WalWriterProcess:
+ beentry->st_backendType = B_WAL_WRITER;
+ break;
+ case WalReceiverProcess:
+ beentry->st_backendType = B_WAL_RECEIVER;
+ break;
+ default:
+ elog(FATAL, "unrecognized process type: %d",
+ (int) MyAuxProcType);
+ proc_exit(1);
+ }
+ }
+
do
{
pgstat_increment_changecount_before(beentry);
} while ((beentry->st_changecount & 1) == 0);
beentry->st_procpid = MyProcPid;
- beentry->st_proc_start_timestamp = proc_start_timestamp;
+ beentry->st_proc_start_timestamp = MyStartTimestamp;
beentry->st_activity_start_timestamp = 0;
beentry->st_state_start_timestamp = 0;
beentry->st_xact_start_timestamp = 0;
beentry->st_databaseid = MyDatabaseId;
- beentry->st_userid = userid;
+
+ /* We have userid for client-backends, wal-sender and bgworker processes */
+ if (beentry->st_backendType == B_BACKEND
+ || beentry->st_backendType == B_WAL_SENDER
+ || beentry->st_backendType == B_BG_WORKER)
+ beentry->st_userid = GetSessionUserId();
+ else
+ beentry->st_userid = InvalidOid;
+
beentry->st_clientaddr = clientaddr;
if (MyProcPort && MyProcPort->remote_hostname)
strlcpy(beentry->st_clienthostname, MyProcPort->remote_hostname,
beentry->st_ssl = true;
beentry->st_sslstatus->ssl_bits = be_tls_get_cipher_bits(MyProcPort);
beentry->st_sslstatus->ssl_compression = be_tls_get_compression(MyProcPort);
- be_tls_get_version(MyProcPort, beentry->st_sslstatus->ssl_version, NAMEDATALEN);
- be_tls_get_cipher(MyProcPort, beentry->st_sslstatus->ssl_cipher, NAMEDATALEN);
- be_tls_get_peerdn_name(MyProcPort, beentry->st_sslstatus->ssl_clientdn, NAMEDATALEN);
+ strlcpy(beentry->st_sslstatus->ssl_version, be_tls_get_version(MyProcPort), NAMEDATALEN);
+ strlcpy(beentry->st_sslstatus->ssl_cipher, be_tls_get_cipher(MyProcPort), NAMEDATALEN);
+ be_tls_get_peer_subject_name(MyProcPort, beentry->st_sslstatus->ssl_client_dn, NAMEDATALEN);
+ be_tls_get_peer_serial(MyProcPort, beentry->st_sslstatus->ssl_client_serial, NAMEDATALEN);
+ be_tls_get_peer_issuer_name(MyProcPort, beentry->st_sslstatus->ssl_issuer_dn, NAMEDATALEN);
}
else
{
#else
beentry->st_ssl = false;
#endif
+
+#ifdef ENABLE_GSS
+ if (MyProcPort && MyProcPort->gss != NULL)
+ {
+ beentry->st_gss = true;
+ beentry->st_gssstatus->gss_auth = be_gssapi_get_auth(MyProcPort);
+ beentry->st_gssstatus->gss_enc = be_gssapi_get_enc(MyProcPort);
+
+ if (beentry->st_gssstatus->gss_auth)
+ strlcpy(beentry->st_gssstatus->gss_princ, be_gssapi_get_princ(MyProcPort), NAMEDATALEN);
+ }
+ else
+ {
+ beentry->st_gss = false;
+ }
+#else
+ beentry->st_gss = false;
+#endif
beentry->st_state = STATE_UNDEFINED;
beentry->st_appname[0] = '\0';
- beentry->st_activity[0] = '\0';
+ beentry->st_activity_raw[0] = '\0';
/* Also make sure the last byte in each string area is always 0 */
beentry->st_clienthostname[NAMEDATALEN - 1] = '\0';
beentry->st_appname[NAMEDATALEN - 1] = '\0';
- beentry->st_activity[pgstat_track_activity_query_size - 1] = '\0';
+ beentry->st_activity_raw[pgstat_track_activity_query_size - 1] = '\0';
beentry->st_progress_command = PROGRESS_COMMAND_INVALID;
beentry->st_progress_command_target = InvalidOid;
+
/*
* we don't zero st_progress_param here to save cycles; nobody should
* examine it until st_progress_command has been set to something other
pgstat_increment_changecount_before(beentry);
beentry->st_state = STATE_DISABLED;
beentry->st_state_start_timestamp = 0;
- beentry->st_activity[0] = '\0';
+ beentry->st_activity_raw[0] = '\0';
beentry->st_activity_start_timestamp = 0;
/* st_xact_start_timestamp and wait_event_info are also disabled */
beentry->st_xact_start_timestamp = 0;
start_timestamp = GetCurrentStatementStartTimestamp();
if (cmd_str != NULL)
{
- len = pg_mbcliplen(cmd_str, strlen(cmd_str),
- pgstat_track_activity_query_size - 1);
+ /*
+ * Compute length of to-be-stored string unaware of multi-byte
+ * characters. For speed reasons that'll get corrected on read, rather
+ * than computed every write.
+ */
+ len = Min(strlen(cmd_str), pgstat_track_activity_query_size - 1);
}
current_timestamp = GetCurrentTimestamp();
if (cmd_str != NULL)
{
- memcpy((char *) beentry->st_activity, cmd_str, len);
- beentry->st_activity[len] = '\0';
+ memcpy((char *) beentry->st_activity_raw, cmd_str, len);
+ beentry->st_activity_raw[len] = '\0';
beentry->st_activity_start_timestamp = start_timestamp;
}
const int64 *val)
{
volatile PgBackendStatus *beentry = MyBEEntry;
- int i;
+ int i;
if (!beentry || !pgstat_track_activities || nparam == 0)
return;
LocalPgBackendStatus *localtable;
LocalPgBackendStatus *localentry;
char *localappname,
+ *localclienthostname,
*localactivity;
#ifdef USE_SSL
PgBackendSSLStatus *localsslstatus;
localtable = (LocalPgBackendStatus *)
MemoryContextAlloc(pgStatLocalContext,
- sizeof(LocalPgBackendStatus) * MaxBackends);
+ sizeof(LocalPgBackendStatus) * NumBackendStatSlots);
localappname = (char *)
MemoryContextAlloc(pgStatLocalContext,
- NAMEDATALEN * MaxBackends);
+ NAMEDATALEN * NumBackendStatSlots);
+ localclienthostname = (char *)
+ MemoryContextAlloc(pgStatLocalContext,
+ NAMEDATALEN * NumBackendStatSlots);
localactivity = (char *)
MemoryContextAlloc(pgStatLocalContext,
- pgstat_track_activity_query_size * MaxBackends);
+ pgstat_track_activity_query_size * NumBackendStatSlots);
#ifdef USE_SSL
localsslstatus = (PgBackendSSLStatus *)
MemoryContextAlloc(pgStatLocalContext,
- sizeof(PgBackendSSLStatus) * MaxBackends);
+ sizeof(PgBackendSSLStatus) * NumBackendStatSlots);
#endif
localNumBackends = 0;
beentry = BackendStatusArray;
localentry = localtable;
- for (i = 1; i <= MaxBackends; i++)
+ for (i = 1; i <= NumBackendStatSlots; i++)
{
/*
* Follow the protocol of retrying if st_changecount changes while we
localentry->backendStatus.st_procpid = beentry->st_procpid;
if (localentry->backendStatus.st_procpid > 0)
{
- memcpy(&localentry->backendStatus, (char *) beentry, sizeof(PgBackendStatus));
+ memcpy(&localentry->backendStatus, unvolatize(PgBackendStatus *, beentry), sizeof(PgBackendStatus));
/*
* strcpy is safe even if the string is modified concurrently,
*/
strcpy(localappname, (char *) beentry->st_appname);
localentry->backendStatus.st_appname = localappname;
- strcpy(localactivity, (char *) beentry->st_activity);
- localentry->backendStatus.st_activity = localactivity;
+ strcpy(localclienthostname, (char *) beentry->st_clienthostname);
+ localentry->backendStatus.st_clienthostname = localclienthostname;
+ strcpy(localactivity, (char *) beentry->st_activity_raw);
+ localentry->backendStatus.st_activity_raw = localactivity;
localentry->backendStatus.st_ssl = beentry->st_ssl;
#ifdef USE_SSL
if (beentry->st_ssl)
localentry++;
localappname += NAMEDATALEN;
+ localclienthostname += NAMEDATALEN;
localactivity += pgstat_track_activity_query_size;
#ifdef USE_SSL
localsslstatus++;
const char *
pgstat_get_wait_event_type(uint32 wait_event_info)
{
- uint8 classId;
+ uint32 classId;
const char *event_type;
/* report process as not waiting. */
if (wait_event_info == 0)
return NULL;
- wait_event_info = wait_event_info >> 24;
- classId = wait_event_info & 0XFF;
+ classId = wait_event_info & 0xFF000000;
switch (classId)
{
- case WAIT_LWLOCK_NAMED:
- event_type = "LWLockNamed";
- break;
- case WAIT_LWLOCK_TRANCHE:
- event_type = "LWLockTranche";
+ case PG_WAIT_LWLOCK:
+ event_type = "LWLock";
break;
- case WAIT_LOCK:
+ case PG_WAIT_LOCK:
event_type = "Lock";
break;
- case WAIT_BUFFER_PIN:
+ case PG_WAIT_BUFFER_PIN:
event_type = "BufferPin";
break;
+ case PG_WAIT_ACTIVITY:
+ event_type = "Activity";
+ break;
+ case PG_WAIT_CLIENT:
+ event_type = "Client";
+ break;
+ case PG_WAIT_EXTENSION:
+ event_type = "Extension";
+ break;
+ case PG_WAIT_IPC:
+ event_type = "IPC";
+ break;
+ case PG_WAIT_TIMEOUT:
+ event_type = "Timeout";
+ break;
+ case PG_WAIT_IO:
+ event_type = "IO";
+ break;
default:
event_type = "???";
break;
const char *
pgstat_get_wait_event(uint32 wait_event_info)
{
- uint8 classId;
+ uint32 classId;
uint16 eventId;
const char *event_name;
if (wait_event_info == 0)
return NULL;
- eventId = wait_event_info & ((1 << 24) - 1);
- wait_event_info = wait_event_info >> 24;
- classId = wait_event_info & 0XFF;
+ classId = wait_event_info & 0xFF000000;
+ eventId = wait_event_info & 0x0000FFFF;
switch (classId)
{
- case WAIT_LWLOCK_NAMED:
- case WAIT_LWLOCK_TRANCHE:
+ case PG_WAIT_LWLOCK:
event_name = GetLWLockIdentifier(classId, eventId);
break;
- case WAIT_LOCK:
+ case PG_WAIT_LOCK:
event_name = GetLockNameFromTagType(eventId);
break;
- case WAIT_BUFFER_PIN:
+ case PG_WAIT_BUFFER_PIN:
event_name = "BufferPin";
break;
+ case PG_WAIT_ACTIVITY:
+ {
+ WaitEventActivity w = (WaitEventActivity) wait_event_info;
+
+ event_name = pgstat_get_wait_activity(w);
+ break;
+ }
+ case PG_WAIT_CLIENT:
+ {
+ WaitEventClient w = (WaitEventClient) wait_event_info;
+
+ event_name = pgstat_get_wait_client(w);
+ break;
+ }
+ case PG_WAIT_EXTENSION:
+ event_name = "Extension";
+ break;
+ case PG_WAIT_IPC:
+ {
+ WaitEventIPC w = (WaitEventIPC) wait_event_info;
+
+ event_name = pgstat_get_wait_ipc(w);
+ break;
+ }
+ case PG_WAIT_TIMEOUT:
+ {
+ WaitEventTimeout w = (WaitEventTimeout) wait_event_info;
+
+ event_name = pgstat_get_wait_timeout(w);
+ break;
+ }
+ case PG_WAIT_IO:
+ {
+ WaitEventIO w = (WaitEventIO) wait_event_info;
+
+ event_name = pgstat_get_wait_io(w);
+ break;
+ }
default:
event_name = "unknown wait event";
break;
}
/* ----------
- * pgstat_get_backend_current_activity() -
- *
- * Return a string representing the current activity of the backend with
- * the specified PID. This looks directly at the BackendStatusArray,
- * and so will provide current information regardless of the age of our
- * transaction's snapshot of the status array.
- *
- * It is the caller's responsibility to invoke this only for backends whose
- * state is expected to remain stable while the result is in use. The
- * only current use is in deadlock reporting, where we can expect that
- * the target backend is blocked on a lock. (There are corner cases
- * where the target's wait could get aborted while we are looking at it,
- * but the very worst consequence is to return a pointer to a string
- * that's been changed, so we won't worry too much.)
+ * pgstat_get_wait_activity() -
*
- * Note: return strings for special cases match pg_stat_get_backend_activity.
+ * Convert WaitEventActivity to string.
* ----------
*/
-const char *
-pgstat_get_backend_current_activity(int pid, bool checkUser)
+static const char *
+pgstat_get_wait_activity(WaitEventActivity w)
{
- PgBackendStatus *beentry;
- int i;
+ const char *event_name = "unknown wait event";
- beentry = BackendStatusArray;
- for (i = 1; i <= MaxBackends; i++)
+ switch (w)
{
- /*
- * Although we expect the target backend's entry to be stable, that
- * doesn't imply that anyone else's is. To avoid identifying the
- * wrong backend, while we check for a match to the desired PID we
- * must follow the protocol of retrying if st_changecount changes
- * while we examine the entry, or if it's odd. (This might be
- * unnecessary, since fetching or storing an int is almost certainly
- * atomic, but let's play it safe.) We use a volatile pointer here to
- * ensure the compiler doesn't try to get cute.
- */
- volatile PgBackendStatus *vbeentry = beentry;
- bool found;
-
- for (;;)
- {
- int before_changecount;
- int after_changecount;
-
- pgstat_save_changecount_before(vbeentry, before_changecount);
-
+ case WAIT_EVENT_ARCHIVER_MAIN:
+ event_name = "ArchiverMain";
+ break;
+ case WAIT_EVENT_AUTOVACUUM_MAIN:
+ event_name = "AutoVacuumMain";
+ break;
+ case WAIT_EVENT_BGWRITER_HIBERNATE:
+ event_name = "BgWriterHibernate";
+ break;
+ case WAIT_EVENT_BGWRITER_MAIN:
+ event_name = "BgWriterMain";
+ break;
+ case WAIT_EVENT_CHECKPOINTER_MAIN:
+ event_name = "CheckpointerMain";
+ break;
+ case WAIT_EVENT_LOGICAL_APPLY_MAIN:
+ event_name = "LogicalApplyMain";
+ break;
+ case WAIT_EVENT_LOGICAL_LAUNCHER_MAIN:
+ event_name = "LogicalLauncherMain";
+ break;
+ case WAIT_EVENT_PGSTAT_MAIN:
+ event_name = "PgStatMain";
+ break;
+ case WAIT_EVENT_RECOVERY_WAL_ALL:
+ event_name = "RecoveryWalAll";
+ break;
+ case WAIT_EVENT_RECOVERY_WAL_STREAM:
+ event_name = "RecoveryWalStream";
+ break;
+ case WAIT_EVENT_SYSLOGGER_MAIN:
+ event_name = "SysLoggerMain";
+ break;
+ case WAIT_EVENT_WAL_RECEIVER_MAIN:
+ event_name = "WalReceiverMain";
+ break;
+ case WAIT_EVENT_WAL_SENDER_MAIN:
+ event_name = "WalSenderMain";
+ break;
+ case WAIT_EVENT_WAL_WRITER_MAIN:
+ event_name = "WalWriterMain";
+ break;
+ /* no default case, so that compiler will warn */
+ }
+
+ return event_name;
+}
+
+/* ----------
+ * pgstat_get_wait_client() -
+ *
+ * Convert WaitEventClient to string.
+ * ----------
+ */
+static const char *
+pgstat_get_wait_client(WaitEventClient w)
+{
+ const char *event_name = "unknown wait event";
+
+ switch (w)
+ {
+ case WAIT_EVENT_CLIENT_READ:
+ event_name = "ClientRead";
+ break;
+ case WAIT_EVENT_CLIENT_WRITE:
+ event_name = "ClientWrite";
+ break;
+ case WAIT_EVENT_LIBPQWALRECEIVER_CONNECT:
+ event_name = "LibPQWalReceiverConnect";
+ break;
+ case WAIT_EVENT_LIBPQWALRECEIVER_RECEIVE:
+ event_name = "LibPQWalReceiverReceive";
+ break;
+ case WAIT_EVENT_SSL_OPEN_SERVER:
+ event_name = "SSLOpenServer";
+ break;
+ case WAIT_EVENT_WAL_RECEIVER_WAIT_START:
+ event_name = "WalReceiverWaitStart";
+ break;
+ case WAIT_EVENT_WAL_SENDER_WAIT_WAL:
+ event_name = "WalSenderWaitForWAL";
+ break;
+ case WAIT_EVENT_WAL_SENDER_WRITE_DATA:
+ event_name = "WalSenderWriteData";
+ break;
+ case WAIT_EVENT_GSS_OPEN_SERVER:
+ event_name = "GSSOpenServer";
+ break;
+ /* no default case, so that compiler will warn */
+ }
+
+ return event_name;
+}
+
+/* ----------
+ * pgstat_get_wait_ipc() -
+ *
+ * Convert WaitEventIPC to string.
+ * ----------
+ */
+static const char *
+pgstat_get_wait_ipc(WaitEventIPC w)
+{
+ const char *event_name = "unknown wait event";
+
+ switch (w)
+ {
+ case WAIT_EVENT_BGWORKER_SHUTDOWN:
+ event_name = "BgWorkerShutdown";
+ break;
+ case WAIT_EVENT_BGWORKER_STARTUP:
+ event_name = "BgWorkerStartup";
+ break;
+ case WAIT_EVENT_BTREE_PAGE:
+ event_name = "BtreePage";
+ break;
+ case WAIT_EVENT_CHECKPOINT_DONE:
+ event_name = "CheckpointDone";
+ break;
+ case WAIT_EVENT_CHECKPOINT_START:
+ event_name = "CheckpointStart";
+ break;
+ case WAIT_EVENT_CLOG_GROUP_UPDATE:
+ event_name = "ClogGroupUpdate";
+ break;
+ case WAIT_EVENT_EXECUTE_GATHER:
+ event_name = "ExecuteGather";
+ break;
+ case WAIT_EVENT_HASH_BATCH_ALLOCATING:
+ event_name = "Hash/Batch/Allocating";
+ break;
+ case WAIT_EVENT_HASH_BATCH_ELECTING:
+ event_name = "Hash/Batch/Electing";
+ break;
+ case WAIT_EVENT_HASH_BATCH_LOADING:
+ event_name = "Hash/Batch/Loading";
+ break;
+ case WAIT_EVENT_HASH_BUILD_ALLOCATING:
+ event_name = "Hash/Build/Allocating";
+ break;
+ case WAIT_EVENT_HASH_BUILD_ELECTING:
+ event_name = "Hash/Build/Electing";
+ break;
+ case WAIT_EVENT_HASH_BUILD_HASHING_INNER:
+ event_name = "Hash/Build/HashingInner";
+ break;
+ case WAIT_EVENT_HASH_BUILD_HASHING_OUTER:
+ event_name = "Hash/Build/HashingOuter";
+ break;
+ case WAIT_EVENT_HASH_GROW_BATCHES_ALLOCATING:
+ event_name = "Hash/GrowBatches/Allocating";
+ break;
+ case WAIT_EVENT_HASH_GROW_BATCHES_DECIDING:
+ event_name = "Hash/GrowBatches/Deciding";
+ break;
+ case WAIT_EVENT_HASH_GROW_BATCHES_ELECTING:
+ event_name = "Hash/GrowBatches/Electing";
+ break;
+ case WAIT_EVENT_HASH_GROW_BATCHES_FINISHING:
+ event_name = "Hash/GrowBatches/Finishing";
+ break;
+ case WAIT_EVENT_HASH_GROW_BATCHES_REPARTITIONING:
+ event_name = "Hash/GrowBatches/Repartitioning";
+ break;
+ case WAIT_EVENT_HASH_GROW_BUCKETS_ALLOCATING:
+ event_name = "Hash/GrowBuckets/Allocating";
+ break;
+ case WAIT_EVENT_HASH_GROW_BUCKETS_ELECTING:
+ event_name = "Hash/GrowBuckets/Electing";
+ break;
+ case WAIT_EVENT_HASH_GROW_BUCKETS_REINSERTING:
+ event_name = "Hash/GrowBuckets/Reinserting";
+ break;
+ case WAIT_EVENT_LOGICAL_SYNC_DATA:
+ event_name = "LogicalSyncData";
+ break;
+ case WAIT_EVENT_LOGICAL_SYNC_STATE_CHANGE:
+ event_name = "LogicalSyncStateChange";
+ break;
+ case WAIT_EVENT_MQ_INTERNAL:
+ event_name = "MessageQueueInternal";
+ break;
+ case WAIT_EVENT_MQ_PUT_MESSAGE:
+ event_name = "MessageQueuePutMessage";
+ break;
+ case WAIT_EVENT_MQ_RECEIVE:
+ event_name = "MessageQueueReceive";
+ break;
+ case WAIT_EVENT_MQ_SEND:
+ event_name = "MessageQueueSend";
+ break;
+ case WAIT_EVENT_PARALLEL_BITMAP_SCAN:
+ event_name = "ParallelBitmapScan";
+ break;
+ case WAIT_EVENT_PARALLEL_CREATE_INDEX_SCAN:
+ event_name = "ParallelCreateIndexScan";
+ break;
+ case WAIT_EVENT_PARALLEL_FINISH:
+ event_name = "ParallelFinish";
+ break;
+ case WAIT_EVENT_PROCARRAY_GROUP_UPDATE:
+ event_name = "ProcArrayGroupUpdate";
+ break;
+ case WAIT_EVENT_PROMOTE:
+ event_name = "Promote";
+ break;
+ case WAIT_EVENT_REPLICATION_ORIGIN_DROP:
+ event_name = "ReplicationOriginDrop";
+ break;
+ case WAIT_EVENT_REPLICATION_SLOT_DROP:
+ event_name = "ReplicationSlotDrop";
+ break;
+ case WAIT_EVENT_SAFE_SNAPSHOT:
+ event_name = "SafeSnapshot";
+ break;
+ case WAIT_EVENT_SYNC_REP:
+ event_name = "SyncRep";
+ break;
+ /* no default case, so that compiler will warn */
+ }
+
+ return event_name;
+}
+
+/* ----------
+ * pgstat_get_wait_timeout() -
+ *
+ * Convert WaitEventTimeout to string.
+ * ----------
+ */
+static const char *
+pgstat_get_wait_timeout(WaitEventTimeout w)
+{
+ const char *event_name = "unknown wait event";
+
+ switch (w)
+ {
+ case WAIT_EVENT_BASE_BACKUP_THROTTLE:
+ event_name = "BaseBackupThrottle";
+ break;
+ case WAIT_EVENT_PG_SLEEP:
+ event_name = "PgSleep";
+ break;
+ case WAIT_EVENT_RECOVERY_APPLY_DELAY:
+ event_name = "RecoveryApplyDelay";
+ break;
+ /* no default case, so that compiler will warn */
+ }
+
+ return event_name;
+}
+
+/* ----------
+ * pgstat_get_wait_io() -
+ *
+ * Convert WaitEventIO to string.
+ * ----------
+ */
+static const char *
+pgstat_get_wait_io(WaitEventIO w)
+{
+ const char *event_name = "unknown wait event";
+
+ switch (w)
+ {
+ case WAIT_EVENT_BUFFILE_READ:
+ event_name = "BufFileRead";
+ break;
+ case WAIT_EVENT_BUFFILE_WRITE:
+ event_name = "BufFileWrite";
+ break;
+ case WAIT_EVENT_CONTROL_FILE_READ:
+ event_name = "ControlFileRead";
+ break;
+ case WAIT_EVENT_CONTROL_FILE_SYNC:
+ event_name = "ControlFileSync";
+ break;
+ case WAIT_EVENT_CONTROL_FILE_SYNC_UPDATE:
+ event_name = "ControlFileSyncUpdate";
+ break;
+ case WAIT_EVENT_CONTROL_FILE_WRITE:
+ event_name = "ControlFileWrite";
+ break;
+ case WAIT_EVENT_CONTROL_FILE_WRITE_UPDATE:
+ event_name = "ControlFileWriteUpdate";
+ break;
+ case WAIT_EVENT_COPY_FILE_READ:
+ event_name = "CopyFileRead";
+ break;
+ case WAIT_EVENT_COPY_FILE_WRITE:
+ event_name = "CopyFileWrite";
+ break;
+ case WAIT_EVENT_DATA_FILE_EXTEND:
+ event_name = "DataFileExtend";
+ break;
+ case WAIT_EVENT_DATA_FILE_FLUSH:
+ event_name = "DataFileFlush";
+ break;
+ case WAIT_EVENT_DATA_FILE_IMMEDIATE_SYNC:
+ event_name = "DataFileImmediateSync";
+ break;
+ case WAIT_EVENT_DATA_FILE_PREFETCH:
+ event_name = "DataFilePrefetch";
+ break;
+ case WAIT_EVENT_DATA_FILE_READ:
+ event_name = "DataFileRead";
+ break;
+ case WAIT_EVENT_DATA_FILE_SYNC:
+ event_name = "DataFileSync";
+ break;
+ case WAIT_EVENT_DATA_FILE_TRUNCATE:
+ event_name = "DataFileTruncate";
+ break;
+ case WAIT_EVENT_DATA_FILE_WRITE:
+ event_name = "DataFileWrite";
+ break;
+ case WAIT_EVENT_DSM_FILL_ZERO_WRITE:
+ event_name = "DSMFillZeroWrite";
+ break;
+ case WAIT_EVENT_LOCK_FILE_ADDTODATADIR_READ:
+ event_name = "LockFileAddToDataDirRead";
+ break;
+ case WAIT_EVENT_LOCK_FILE_ADDTODATADIR_SYNC:
+ event_name = "LockFileAddToDataDirSync";
+ break;
+ case WAIT_EVENT_LOCK_FILE_ADDTODATADIR_WRITE:
+ event_name = "LockFileAddToDataDirWrite";
+ break;
+ case WAIT_EVENT_LOCK_FILE_CREATE_READ:
+ event_name = "LockFileCreateRead";
+ break;
+ case WAIT_EVENT_LOCK_FILE_CREATE_SYNC:
+ event_name = "LockFileCreateSync";
+ break;
+ case WAIT_EVENT_LOCK_FILE_CREATE_WRITE:
+ event_name = "LockFileCreateWrite";
+ break;
+ case WAIT_EVENT_LOCK_FILE_RECHECKDATADIR_READ:
+ event_name = "LockFileReCheckDataDirRead";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_CHECKPOINT_SYNC:
+ event_name = "LogicalRewriteCheckpointSync";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_MAPPING_SYNC:
+ event_name = "LogicalRewriteMappingSync";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_MAPPING_WRITE:
+ event_name = "LogicalRewriteMappingWrite";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_SYNC:
+ event_name = "LogicalRewriteSync";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_TRUNCATE:
+ event_name = "LogicalRewriteTruncate";
+ break;
+ case WAIT_EVENT_LOGICAL_REWRITE_WRITE:
+ event_name = "LogicalRewriteWrite";
+ break;
+ case WAIT_EVENT_RELATION_MAP_READ:
+ event_name = "RelationMapRead";
+ break;
+ case WAIT_EVENT_RELATION_MAP_SYNC:
+ event_name = "RelationMapSync";
+ break;
+ case WAIT_EVENT_RELATION_MAP_WRITE:
+ event_name = "RelationMapWrite";
+ break;
+ case WAIT_EVENT_REORDER_BUFFER_READ:
+ event_name = "ReorderBufferRead";
+ break;
+ case WAIT_EVENT_REORDER_BUFFER_WRITE:
+ event_name = "ReorderBufferWrite";
+ break;
+ case WAIT_EVENT_REORDER_LOGICAL_MAPPING_READ:
+ event_name = "ReorderLogicalMappingRead";
+ break;
+ case WAIT_EVENT_REPLICATION_SLOT_READ:
+ event_name = "ReplicationSlotRead";
+ break;
+ case WAIT_EVENT_REPLICATION_SLOT_RESTORE_SYNC:
+ event_name = "ReplicationSlotRestoreSync";
+ break;
+ case WAIT_EVENT_REPLICATION_SLOT_SYNC:
+ event_name = "ReplicationSlotSync";
+ break;
+ case WAIT_EVENT_REPLICATION_SLOT_WRITE:
+ event_name = "ReplicationSlotWrite";
+ break;
+ case WAIT_EVENT_SLRU_FLUSH_SYNC:
+ event_name = "SLRUFlushSync";
+ break;
+ case WAIT_EVENT_SLRU_READ:
+ event_name = "SLRURead";
+ break;
+ case WAIT_EVENT_SLRU_SYNC:
+ event_name = "SLRUSync";
+ break;
+ case WAIT_EVENT_SLRU_WRITE:
+ event_name = "SLRUWrite";
+ break;
+ case WAIT_EVENT_SNAPBUILD_READ:
+ event_name = "SnapbuildRead";
+ break;
+ case WAIT_EVENT_SNAPBUILD_SYNC:
+ event_name = "SnapbuildSync";
+ break;
+ case WAIT_EVENT_SNAPBUILD_WRITE:
+ event_name = "SnapbuildWrite";
+ break;
+ case WAIT_EVENT_TIMELINE_HISTORY_FILE_SYNC:
+ event_name = "TimelineHistoryFileSync";
+ break;
+ case WAIT_EVENT_TIMELINE_HISTORY_FILE_WRITE:
+ event_name = "TimelineHistoryFileWrite";
+ break;
+ case WAIT_EVENT_TIMELINE_HISTORY_READ:
+ event_name = "TimelineHistoryRead";
+ break;
+ case WAIT_EVENT_TIMELINE_HISTORY_SYNC:
+ event_name = "TimelineHistorySync";
+ break;
+ case WAIT_EVENT_TIMELINE_HISTORY_WRITE:
+ event_name = "TimelineHistoryWrite";
+ break;
+ case WAIT_EVENT_TWOPHASE_FILE_READ:
+ event_name = "TwophaseFileRead";
+ break;
+ case WAIT_EVENT_TWOPHASE_FILE_SYNC:
+ event_name = "TwophaseFileSync";
+ break;
+ case WAIT_EVENT_TWOPHASE_FILE_WRITE:
+ event_name = "TwophaseFileWrite";
+ break;
+ case WAIT_EVENT_WALSENDER_TIMELINE_HISTORY_READ:
+ event_name = "WALSenderTimelineHistoryRead";
+ break;
+ case WAIT_EVENT_WAL_BOOTSTRAP_SYNC:
+ event_name = "WALBootstrapSync";
+ break;
+ case WAIT_EVENT_WAL_BOOTSTRAP_WRITE:
+ event_name = "WALBootstrapWrite";
+ break;
+ case WAIT_EVENT_WAL_COPY_READ:
+ event_name = "WALCopyRead";
+ break;
+ case WAIT_EVENT_WAL_COPY_SYNC:
+ event_name = "WALCopySync";
+ break;
+ case WAIT_EVENT_WAL_COPY_WRITE:
+ event_name = "WALCopyWrite";
+ break;
+ case WAIT_EVENT_WAL_INIT_SYNC:
+ event_name = "WALInitSync";
+ break;
+ case WAIT_EVENT_WAL_INIT_WRITE:
+ event_name = "WALInitWrite";
+ break;
+ case WAIT_EVENT_WAL_READ:
+ event_name = "WALRead";
+ break;
+ case WAIT_EVENT_WAL_SYNC:
+ event_name = "WALSync";
+ break;
+ case WAIT_EVENT_WAL_SYNC_METHOD_ASSIGN:
+ event_name = "WALSyncMethodAssign";
+ break;
+ case WAIT_EVENT_WAL_WRITE:
+ event_name = "WALWrite";
+ break;
+
+ /* no default case, so that compiler will warn */
+ }
+
+ return event_name;
+}
+
+
+/* ----------
+ * pgstat_get_backend_current_activity() -
+ *
+ * Return a string representing the current activity of the backend with
+ * the specified PID. This looks directly at the BackendStatusArray,
+ * and so will provide current information regardless of the age of our
+ * transaction's snapshot of the status array.
+ *
+ * It is the caller's responsibility to invoke this only for backends whose
+ * state is expected to remain stable while the result is in use. The
+ * only current use is in deadlock reporting, where we can expect that
+ * the target backend is blocked on a lock. (There are corner cases
+ * where the target's wait could get aborted while we are looking at it,
+ * but the very worst consequence is to return a pointer to a string
+ * that's been changed, so we won't worry too much.)
+ *
+ * Note: return strings for special cases match pg_stat_get_backend_activity.
+ * ----------
+ */
+const char *
+pgstat_get_backend_current_activity(int pid, bool checkUser)
+{
+ PgBackendStatus *beentry;
+ int i;
+
+ beentry = BackendStatusArray;
+ for (i = 1; i <= MaxBackends; i++)
+ {
+ /*
+ * Although we expect the target backend's entry to be stable, that
+ * doesn't imply that anyone else's is. To avoid identifying the
+ * wrong backend, while we check for a match to the desired PID we
+ * must follow the protocol of retrying if st_changecount changes
+ * while we examine the entry, or if it's odd. (This might be
+ * unnecessary, since fetching or storing an int is almost certainly
+ * atomic, but let's play it safe.) We use a volatile pointer here to
+ * ensure the compiler doesn't try to get cute.
+ */
+ volatile PgBackendStatus *vbeentry = beentry;
+ bool found;
+
+ for (;;)
+ {
+ int before_changecount;
+ int after_changecount;
+
+ pgstat_save_changecount_before(vbeentry, before_changecount);
+
found = (vbeentry->st_procpid == pid);
pgstat_save_changecount_after(vbeentry, after_changecount);
/* Now it is safe to use the non-volatile pointer */
if (checkUser && !superuser() && beentry->st_userid != GetUserId())
return "<insufficient privilege>";
- else if (*(beentry->st_activity) == '\0')
+ else if (*(beentry->st_activity_raw) == '\0')
return "<command string not enabled>";
else
- return beentry->st_activity;
+ {
+ /* this'll leak a bit of memory, but that seems acceptable */
+ return pgstat_clip_activity(beentry->st_activity_raw);
+ }
}
beentry++;
if (beentry->st_procpid == pid)
{
/* Read pointer just once, so it can't change after validation */
- const char *activity = beentry->st_activity;
+ const char *activity = beentry->st_activity_raw;
const char *activity_last;
/*
/*
* Copy only ASCII-safe characters so we don't run into encoding
* problems when reporting the message; and be sure not to run off
- * the end of memory.
+ * the end of memory. As only ASCII characters are reported, it
+ * doesn't seem necessary to perform multibyte aware clipping.
*/
ascii_safe_strlcpy(buffer, activity,
Min(buflen, pgstat_track_activity_query_size));
return NULL;
}
+const char *
+pgstat_get_backend_desc(BackendType backendType)
+{
+ const char *backendDesc = "unknown process type";
+
+ switch (backendType)
+ {
+ case B_AUTOVAC_LAUNCHER:
+ backendDesc = "autovacuum launcher";
+ break;
+ case B_AUTOVAC_WORKER:
+ backendDesc = "autovacuum worker";
+ break;
+ case B_BACKEND:
+ backendDesc = "client backend";
+ break;
+ case B_BG_WORKER:
+ backendDesc = "background worker";
+ break;
+ case B_BG_WRITER:
+ backendDesc = "background writer";
+ break;
+ case B_CHECKPOINTER:
+ backendDesc = "checkpointer";
+ break;
+ case B_STARTUP:
+ backendDesc = "startup";
+ break;
+ case B_WAL_RECEIVER:
+ backendDesc = "walreceiver";
+ break;
+ case B_WAL_SENDER:
+ backendDesc = "walsender";
+ break;
+ case B_WAL_WRITER:
+ backendDesc = "walwriter";
+ break;
+ }
+
+ return backendDesc;
+}
/* ------------------------------------------------------------
* Local support functions follow
pqsignal(SIGPIPE, SIG_IGN);
pqsignal(SIGUSR1, SIG_IGN);
pqsignal(SIGUSR2, SIG_IGN);
+ /* Reset some signals that are accepted by postmaster but not here */
pqsignal(SIGCHLD, SIG_DFL);
- pqsignal(SIGTTIN, SIG_DFL);
- pqsignal(SIGTTOU, SIG_DFL);
- pqsignal(SIGCONT, SIG_DFL);
- pqsignal(SIGWINCH, SIG_DFL);
PG_SETMASK(&UnBlockSig);
/*
* Identify myself via ps
*/
- init_ps_display("stats collector process", "", "", "");
+ init_ps_display("stats collector", "", "", "");
/*
- * Read in an existing statistics stats file or initialize the stats to
- * zero.
+ * Read in existing stats files or initialize the stats to zero.
*/
pgStatRunningInCollector = true;
pgStatDBHash = pgstat_read_statsfiles(InvalidOid, true, true);
}
/*
- * Write the stats file if a new request has arrived that is not
- * satisfied by existing file.
+ * Write the stats file(s) if a new request has arrived that is
+ * not satisfied by existing file(s).
*/
if (pgstat_write_statsfile_needed())
pgstat_write_statsfiles(false, false);
case PGSTAT_MTYPE_RESETSHAREDCOUNTER:
pgstat_recv_resetsharedcounter(
- (PgStat_MsgResetsharedcounter *) &msg,
+ (PgStat_MsgResetsharedcounter *) &msg,
len);
break;
case PGSTAT_MTYPE_RESETSINGLECOUNTER:
pgstat_recv_resetsinglecounter(
- (PgStat_MsgResetsinglecounter *) &msg,
+ (PgStat_MsgResetsinglecounter *) &msg,
len);
break;
pgstat_recv_tempfile((PgStat_MsgTempFile *) &msg, len);
break;
+ case PGSTAT_MTYPE_CHECKSUMFAILURE:
+ pgstat_recv_checksum_failure((PgStat_MsgChecksumFailure *) &msg, len);
+ break;
+
default:
break;
}
/* Sleep until there's something to do */
#ifndef WIN32
wr = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE,
- pgStatSock,
- -1L);
+ WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE,
+ pgStatSock, -1L,
+ WAIT_EVENT_PGSTAT_MAIN);
#else
/*
* backend_read_statsfile.
*/
wr = WaitLatchOrSocket(MyLatch,
- WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
+ WL_LATCH_SET | WL_POSTMASTER_DEATH | WL_SOCKET_READABLE | WL_TIMEOUT,
pgStatSock,
- 2 * 1000L /* msec */ );
+ 2 * 1000L /* msec */ ,
+ WAIT_EVENT_PGSTAT_MAIN);
#endif
/*
dbentry->n_temp_files = 0;
dbentry->n_temp_bytes = 0;
dbentry->n_deadlocks = 0;
+ dbentry->n_checksum_failures = 0;
dbentry->n_block_read_time = 0;
dbentry->n_block_write_time = 0;
* pgstat_write_statsfiles() -
* Write the global statistics file, as well as requested DB files.
*
- * If writing to the permanent files (happens when the collector is
- * shutting down only), remove the temporary files so that backends
- * starting up under a new postmaster can't read the old data before
- * the new collector is ready.
+ * 'permanent' specifies writing to the permanent files not temporary ones.
+ * When true (happens only when the collector is shutting down), also remove
+ * the temporary files so that backends starting up under a new postmaster
+ * can't read old data before the new collector is ready.
*
* When 'allDbs' is false, only the requested databases (listed in
- * last_statrequests) will be written; otherwise, all databases will be
- * written.
+ * pending_write_requests) will be written; otherwise, all databases
+ * will be written.
* ----------
*/
static void
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Write out the tables and functions into the DB stat file, if
- * required.
- *
- * We need to do this before the dbentry write, to ensure the
- * timestamps written to both are consistent.
+ * Write out the table and function stats for this DB into the
+ * appropriate per-DB stat file, if required.
*/
if (allDbs || pgstat_db_requested(dbentry->databaseid))
{
+ /* Make DB's timestamp consistent with the global stats */
dbentry->stats_timestamp = globalStats.stats_timestamp;
+
pgstat_write_db_statsfile(dbentry, permanent);
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write temporary statistics file \"%s\": %m",
- tmpfile)));
+ errmsg("could not write temporary statistics file \"%s\": %m",
+ tmpfile)));
FreeFile(fpout);
unlink(tmpfile);
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not close temporary statistics file \"%s\": %m",
- tmpfile)));
+ errmsg("could not close temporary statistics file \"%s\": %m",
+ tmpfile)));
unlink(tmpfile);
}
else if (rename(tmpfile, statfile) < 0)
* Now throw away the list of requests. Note that requests sent after we
* started the write are still waiting on the network socket.
*/
- if (!slist_is_empty(&last_statrequests))
- {
- slist_mutable_iter iter;
-
- /*
- * Strictly speaking we should do slist_delete_current() before
- * freeing each request struct. We skip that and instead
- * re-initialize the list header at the end. Nonetheless, we must use
- * slist_foreach_modify, not just slist_foreach, since we will free
- * the node's storage before advancing.
- */
- slist_foreach_modify(iter, &last_statrequests)
- {
- DBWriteRequest *req;
-
- req = slist_container(DBWriteRequest, next, iter.cur);
- pfree(req);
- }
-
- slist_init(&last_statrequests);
- }
+ list_free(pending_write_requests);
+ pending_write_requests = NIL;
}
/*
pgstat_stat_directory,
databaseid,
tempname ? "tmp" : "stat");
- if (printed > len)
+ if (printed >= len)
elog(ERROR, "overlength pgstat path");
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write temporary statistics file \"%s\": %m",
- tmpfile)));
+ errmsg("could not write temporary statistics file \"%s\": %m",
+ tmpfile)));
FreeFile(fpout);
unlink(tmpfile);
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not close temporary statistics file \"%s\": %m",
- tmpfile)));
+ errmsg("could not close temporary statistics file \"%s\": %m",
+ tmpfile)));
unlink(tmpfile);
}
else if (rename(tmpfile, statfile) < 0)
/* ----------
* pgstat_read_statsfiles() -
*
- * Reads in the existing statistics collector files and initializes the
- * databases' hash table. If the permanent file name is requested (which
- * only happens in the stats collector itself), also remove the file after
- * reading; the in-memory status is now authoritative, and the permanent file
- * would be out of date in case somebody else reads it.
+ * Reads in some existing statistics collector files and returns the
+ * databases hash table that is the top level of the data.
+ *
+ * If 'onlydb' is not InvalidOid, it means we only want data for that DB
+ * plus the shared catalogs ("DB 0"). We'll still populate the DB hash
+ * table for all databases, but we don't bother even creating table/function
+ * hash tables for other databases.
+ *
+ * 'permanent' specifies reading from the permanent files not temporary ones.
+ * When true (happens only when the collector is starting up), remove the
+ * files after reading; the in-memory status is now authoritative, and the
+ * files would be out of date in case somebody else reads them.
*
- * If a deep read is requested, table/function stats are read also, otherwise
+ * If a 'deep' read is requested, table/function stats are read, otherwise
* the table/function hash tables remain empty.
* ----------
*/
{
ereport(pgStatRunningInCollector ? LOG : WARNING,
(errmsg("corrupted statistics file \"%s\"", statfile)));
+ memset(&globalStats, 0, sizeof(globalStats));
goto done;
}
+ /*
+ * In the collector, disregard the timestamp we read from the permanent
+ * stats file; we should be willing to write a temp stats file immediately
+ * upon the first request from any backend. This only matters if the old
+ * file's timestamp is less than PGSTAT_STAT_INTERVAL ago, but that's not
+ * an unusual scenario.
+ */
+ if (pgStatRunningInCollector)
+ globalStats.stats_timestamp = 0;
+
/*
* Read archiver stats struct
*/
{
ereport(pgStatRunningInCollector ? LOG : WARNING,
(errmsg("corrupted statistics file \"%s\"", statfile)));
+ memset(&archiverStats, 0, sizeof(archiverStats));
goto done;
}
* Add to the DB hash
*/
dbentry = (PgStat_StatDBEntry *) hash_search(dbhash,
- (void *) &dbbuf.databaseid,
+ (void *) &dbbuf.databaseid,
HASH_ENTER,
&found);
if (found)
dbentry->functions = NULL;
/*
- * Don't collect tables if not the requested DB (or the
- * shared-table info)
+ * In the collector, disregard the timestamp we read from the
+ * permanent stats file; we should be willing to write a temp
+ * stats file immediately upon the first request from any
+ * backend.
+ */
+ if (pgStatRunningInCollector)
+ dbentry->stats_timestamp = 0;
+
+ /*
+ * Don't create tables/functions hashtables for uninteresting
+ * databases.
*/
if (onlydb != InvalidOid)
{
dbentry->tables = hash_create("Per-database table",
PGSTAT_TAB_HASH_SIZE,
&hash_ctl,
- HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
hash_ctl.keysize = sizeof(Oid);
hash_ctl.entrysize = sizeof(PgStat_StatFuncEntry);
dbentry->functions = hash_create("Per-database function",
PGSTAT_FUNCTION_HASH_SIZE,
&hash_ctl,
- HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
+ HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
/*
* If requested, read the data from the database-specific
- * file. If there was onlydb specified (!= InvalidOid), we
- * would not get here because of a break above. So we don't
- * need to recheck.
+ * file. Otherwise we just leave the hashtables empty.
*/
if (deep)
pgstat_read_db_statsfile(dbentry->databaseid,
* pgstat_read_db_statsfile() -
*
* Reads in the existing statistics collector file for the given database,
- * and initializes the tables and functions hash tables.
+ * filling the passed-in tables and functions hash tables.
*
- * As pgstat_read_statsfiles, if the permanent file is requested, it is
+ * As in pgstat_read_statsfiles, if the permanent file is requested, it is
* removed after reading.
+ *
+ * Note: this code has the ability to skip storing per-table or per-function
+ * data, if NULL is passed for the corresponding hashtable. That's not used
+ * at the moment though.
* ----------
*/
static void
}
/*
- * Skip if table belongs to a not requested database.
+ * Skip if table data not wanted.
*/
if (tabhash == NULL)
break;
tabentry = (PgStat_StatTabEntry *) hash_search(tabhash,
- (void *) &tabbuf.tableid,
- HASH_ENTER, &found);
+ (void *) &tabbuf.tableid,
+ HASH_ENTER, &found);
if (found)
{
}
/*
- * Skip if function belongs to a not requested database.
+ * Skip if function data not wanted.
*/
if (funchash == NULL)
break;
funcentry = (PgStat_StatFuncEntry *) hash_search(funchash,
- (void *) &funcbuf.functionid,
- HASH_ENTER, &found);
+ (void *) &funcbuf.functionid,
+ HASH_ENTER, &found);
if (found)
{
elog(DEBUG2, "removing permanent stats file \"%s\"", statfile);
unlink(statfile);
}
-
- return;
}
/* ----------
* pgstat_read_db_statsfile_timestamp() -
*
* Attempt to determine the timestamp of the last db statfile write.
- * Returns TRUE if successful; the timestamp is stored in *ts.
+ * Returns true if successful; the timestamp is stored in *ts.
*
* This needs to be careful about handling databases for which no stats file
* exists, such as databases without a stat entry or those not yet written:
{
TimestampTz min_ts = 0;
TimestampTz ref_ts = 0;
+ Oid inquiry_db;
int count;
/* already read it? */
return;
Assert(!pgStatRunningInCollector);
+ /*
+ * In a normal backend, we check staleness of the data for our own DB, and
+ * so we send MyDatabaseId in inquiry messages. In the autovac launcher,
+ * check staleness of the shared-catalog data, and send InvalidOid in
+ * inquiry messages so as not to force writing unnecessary data.
+ */
+ if (IsAutoVacuumLauncherProcess())
+ inquiry_db = InvalidOid;
+ else
+ inquiry_db = MyDatabaseId;
+
/*
* Loop until fresh enough stats file is available or we ran out of time.
* The stats inquiry message is sent repeatedly in case collector drops
CHECK_FOR_INTERRUPTS();
- ok = pgstat_read_db_statsfile_timestamp(MyDatabaseId, false, &file_ts);
+ ok = pgstat_read_db_statsfile_timestamp(inquiry_db, false, &file_ts);
cur_ts = GetCurrentTimestamp();
/* Calculate min acceptable timestamp, if we didn't already */
pfree(mytime);
}
- pgstat_send_inquiry(cur_ts, min_ts, MyDatabaseId);
+ pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
break;
}
/* Not there or too old, so kick the collector and wait a bit */
if ((count % PGSTAT_INQ_LOOP_COUNT) == 0)
- pgstat_send_inquiry(cur_ts, min_ts, MyDatabaseId);
+ pgstat_send_inquiry(cur_ts, min_ts, inquiry_db);
pg_usleep(PGSTAT_RETRY_DELAY * 1000L);
}
/*
* Autovacuum launcher wants stats about all databases, but a shallow read
- * is sufficient.
+ * is sufficient. Regular backends want a deep read for just the tables
+ * they can see (MyDatabaseId + shared catalogs).
*/
if (IsAutoVacuumLauncherProcess())
pgStatDBHash = pgstat_read_statsfiles(InvalidOid, false, false);
if (!pgStatLocalContext)
pgStatLocalContext = AllocSetContextCreate(TopMemoryContext,
"Statistics snapshot",
- ALLOCSET_SMALL_MINSIZE,
- ALLOCSET_SMALL_INITSIZE,
- ALLOCSET_SMALL_MAXSIZE);
+ ALLOCSET_SMALL_SIZES);
}
static void
pgstat_recv_inquiry(PgStat_MsgInquiry *msg, int len)
{
- slist_iter iter;
- DBWriteRequest *newreq;
PgStat_StatDBEntry *dbentry;
elog(DEBUG2, "received inquiry for database %u", msg->databaseid);
/*
- * Find the last write request for this DB. If it's older than the
- * request's cutoff time, update it; otherwise there's nothing to do.
+ * If there's already a write request for this DB, there's nothing to do.
*
* Note that if a request is found, we return early and skip the below
* check for clock skew. This is okay, since the only way for a DB
* request to be present in the list is that we have been here since the
- * last write round.
+ * last write round. It seems sufficient to check for clock skew once per
+ * write round.
*/
- slist_foreach(iter, &last_statrequests)
- {
- DBWriteRequest *req = slist_container(DBWriteRequest, next, iter.cur);
-
- if (req->databaseid != msg->databaseid)
- continue;
-
- if (msg->cutoff_time > req->request_time)
- req->request_time = msg->cutoff_time;
+ if (list_member_oid(pending_write_requests, msg->databaseid))
return;
- }
-
- /*
- * There's no request for this DB yet, so create one.
- */
- newreq = palloc(sizeof(DBWriteRequest));
-
- newreq->databaseid = msg->databaseid;
- newreq->request_time = msg->clock_time;
- slist_push_head(&last_statrequests, &newreq->next);
/*
+ * Check to see if we last wrote this database at a time >= the requested
+ * cutoff time. If so, this is a stale request that was generated before
+ * we updated the DB file, and we don't need to do so again.
+ *
* If the requestor's local clock time is older than stats_timestamp, we
* should suspect a clock glitch, ie system time going backwards; though
* the more likely explanation is just delayed message receipt. It is
* to update the stats file for a long time.
*/
dbentry = pgstat_get_db_entry(msg->databaseid, false);
- if ((dbentry != NULL) && (msg->clock_time < dbentry->stats_timestamp))
+ if (dbentry == NULL)
+ {
+ /*
+ * We have no data for this DB. Enter a write request anyway so that
+ * the global stats will get updated. This is needed to prevent
+ * backend_read_statsfile from waiting for data that we cannot supply,
+ * in the case of a new DB that nobody has yet reported any stats for.
+ * See the behavior of pgstat_read_db_statsfile_timestamp.
+ */
+ }
+ else if (msg->clock_time < dbentry->stats_timestamp)
{
TimestampTz cur_ts = GetCurrentTimestamp();
writetime, mytime, dbentry->databaseid);
pfree(writetime);
pfree(mytime);
-
- newreq->request_time = cur_ts;
- dbentry->stats_timestamp = cur_ts - 1;
}
+ else
+ {
+ /*
+ * Nope, it's just an old request. Assuming msg's clock_time is
+ * >= its cutoff_time, it must be stale, so we can ignore it.
+ */
+ return;
+ }
+ }
+ else if (msg->cutoff_time <= dbentry->stats_timestamp)
+ {
+ /* Stale request, ignore it */
+ return;
}
+
+ /*
+ * We need to write this DB, so create a request.
+ */
+ pending_write_requests = lappend_oid(pending_write_requests,
+ msg->databaseid);
}
PgStat_TableEntry *tabmsg = &(msg->m_entry[i]);
tabentry = (PgStat_StatTabEntry *) hash_search(dbentry->tables,
- (void *) &(tabmsg->t_id),
+ (void *) &(tabmsg->t_id),
HASH_ENTER, &found);
if (!found)
tabentry->n_dead_tuples = msg->m_dead_tuples;
/*
- * We reset changes_since_analyze to zero, forgetting any changes that
- * occurred while the ANALYZE was in progress.
+ * If commanded, reset changes_since_analyze to zero. This forgets any
+ * changes that were committed while the ANALYZE was in progress, but we
+ * have no good way to estimate how many of those there were.
*/
- tabentry->changes_since_analyze = 0;
+ if (msg->m_resetcounter)
+ tabentry->changes_since_analyze = 0;
if (msg->m_autovacuum)
{
dbentry->n_deadlocks++;
}
+/* ----------
+ * pgstat_recv_checksum_failure() -
+ *
+ * Process a CHECKSUMFAILURE message.
+ * ----------
+ */
+static void
+pgstat_recv_checksum_failure(PgStat_MsgChecksumFailure *msg, int len)
+{
+ PgStat_StatDBEntry *dbentry;
+
+ dbentry = pgstat_get_db_entry(msg->m_databaseid, true);
+
+ dbentry->n_checksum_failures += msg->m_failurecount;
+}
+
/* ----------
* pgstat_recv_tempfile() -
*
for (i = 0; i < msg->m_nentries; i++, funcmsg++)
{
funcentry = (PgStat_StatFuncEntry *) hash_search(dbentry->functions,
- (void *) &(funcmsg->f_id),
+ (void *) &(funcmsg->f_id),
HASH_ENTER, &found);
if (!found)
/* ----------
* pgstat_write_statsfile_needed() -
*
- * Do we need to write out the files?
+ * Do we need to write out any stats files?
* ----------
*/
static bool
pgstat_write_statsfile_needed(void)
{
- if (!slist_is_empty(&last_statrequests))
+ if (pending_write_requests != NIL)
return true;
/* Everything was written recently */
static bool
pgstat_db_requested(Oid databaseid)
{
- slist_iter iter;
-
- /* Check the databases if they need to refresh the stats. */
- slist_foreach(iter, &last_statrequests)
- {
- DBWriteRequest *req = slist_container(DBWriteRequest, next, iter.cur);
+ /*
+ * If any requests are outstanding at all, we should write the stats for
+ * shared catalogs (the "database" with OID 0). This ensures that
+ * backends will see up-to-date stats for shared catalogs, even though
+ * they send inquiry messages mentioning only their own DB.
+ */
+ if (databaseid == InvalidOid && pending_write_requests != NIL)
+ return true;
- if (req->databaseid == databaseid)
- return true;
- }
+ /* Search to see if there's an open request to write this database. */
+ if (list_member_oid(pending_write_requests, databaseid))
+ return true;
return false;
}
+
+/*
+ * Convert a potentially unsafely truncated activity string (see
+ * PgBackendStatus.st_activity_raw's documentation) into a correctly truncated
+ * one.
+ *
+ * The returned string is allocated in the caller's memory context and may be
+ * freed.
+ */
+char *
+pgstat_clip_activity(const char *raw_activity)
+{
+ char *activity;
+ int rawlen;
+ int cliplen;
+
+ /*
+ * Some callers, like pgstat_get_backend_current_activity(), do not
+ * guarantee that the buffer isn't concurrently modified. We try to take
+ * care that the buffer is always terminated by a NUL byte regardless, but
+ * let's still be paranoid about the string's length. In those cases the
+ * underlying buffer is guaranteed to be pgstat_track_activity_query_size
+ * large.
+ */
+ activity = pnstrdup(raw_activity, pgstat_track_activity_query_size - 1);
+
+ /* now double-guaranteed to be NUL terminated */
+ rawlen = strlen(activity);
+
+ /*
+ * All supported server-encodings make it possible to determine the length
+ * of a multi-byte character from its first byte (this is not the case for
+ * client encodings, see GB18030). As st_activity is always stored using
+ * server encoding, this allows us to perform multi-byte aware truncation,
+ * even if the string earlier was truncated in the middle of a multi-byte
+ * character.
+ */
+ cliplen = pg_mbcliplen(activity, rawlen,
+ pgstat_track_activity_query_size - 1);
+
+ activity[cliplen] = '\0';
+
+ return activity;
+}