From 63be3b78f6e1d92c7f02d4f73a55dd5cefab923b Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Sun, 2 Feb 2014 10:28:18 +0900 Subject: [PATCH] Fix typos in docs and comments. Thom Brown --- doc/src/sgml/high-availability.sgml | 2 +- doc/src/sgml/ref/pg_receivexlog.sgml | 2 +- src/backend/replication/slot.c | 10 +++++----- src/backend/replication/walsender.c | 4 ++-- src/bin/pg_basebackup/receivelog.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index 9d43586fe2..a526f6d5b1 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -756,7 +756,7 @@ archive_cleanup_command = 'pg_archivecleanup /path/to/archive %r' has received them. If this occurs, the standby will need to be reinitialized from a new base backup. You can avoid this by setting wal_keep_segments to a value large enough to ensure that - WAL segments are not recycled too early, or by configuration a replication + WAL segments are not recycled too early, or by configuring a replication slot for the standby. If you set up a WAL archive that's accessible from the standby, these solutions are not required, since the standby can always use the archive to catch up provided it retains enough segments. diff --git a/doc/src/sgml/ref/pg_receivexlog.sgml b/doc/src/sgml/ref/pg_receivexlog.sgml index 2a44af46c5..f05e718094 100644 --- a/doc/src/sgml/ref/pg_receivexlog.sgml +++ b/doc/src/sgml/ref/pg_receivexlog.sgml @@ -235,7 +235,7 @@ PostgreSQL documentation When this option is used, pg_receivexlog will report a flush position to the server, indicating when each segment has been synchronized to disk so that the server can remove that segment if it - is not otherwise needed. When using this paramter, it is important + is not otherwise needed. When using this parameter, it is important to make sure that pg_receivexlog cannot become the synchronous standby through an incautious setting of ; it does not flush diff --git a/src/backend/replication/slot.c b/src/backend/replication/slot.c index 30aff5f5e3..826c7f027e 100644 --- a/src/backend/replication/slot.c +++ b/src/backend/replication/slot.c @@ -15,7 +15,7 @@ * Replication slots are used to keep state about replication streams * originating from this cluster. Their primary purpose is to prevent the * premature removal of WAL or of old tuple versions in a manner that would - * interfere with replication; they also useful for monitoring purposes. + * interfere with replication; they are also useful for monitoring purposes. * Slots need to be permanent (to allow restarts), crash-safe, and allocatable * on standbys (to support cascading setups). The requirement that slots be * usable on standbys precludes storing them in the system catalogs. @@ -142,7 +142,7 @@ ReplicationSlotsShmemInit(void) * Check whether the passed slot name is valid and report errors at elevel. * * Slot names may consist out of [a-z0-9_]{1,NAMEDATALEN-1} which should allow - * the name to be uses as a directory name on every supported OS. + * the name to be used as a directory name on every supported OS. * * Returns whether the directory name is valid or not if elevel < ERROR. */ @@ -290,7 +290,7 @@ ReplicationSlotCreate(const char *name, bool db_specific) } /* - * Find an previously created slot and mark it as used by this backend. + * Find a previously created slot and mark it as used by this backend. */ void ReplicationSlotAcquire(const char *name) @@ -743,7 +743,7 @@ CreateSlotOnDisk(ReplicationSlot *slot) /* * No need to take out the io_in_progress_lock, nobody else can see this - * slot yet, so nobody else wil write. We're reusing SaveSlotToPath which + * slot yet, so nobody else will write. We're reusing SaveSlotToPath which * takes out the lock, if we'd take the lock here, we'd deadlock. */ @@ -780,7 +780,7 @@ CreateSlotOnDisk(ReplicationSlot *slot) tmppath, path))); /* - * If we'd now fail - really unlikely - we wouldn't know wether this slot + * If we'd now fail - really unlikely - we wouldn't know whether this slot * would persist after an OS crash or not - so, force a restart. The * restart would try to fysnc this again till it works. */ diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c index a661d88277..06b22e2aba 100644 --- a/src/backend/replication/walsender.c +++ b/src/backend/replication/walsender.c @@ -957,7 +957,7 @@ PhysicalConfirmReceivedLocation(XLogRecPtr lsn) } /* - * One could argue that the slot should saved to disk now, but that'd be + * One could argue that the slot should be saved to disk now, but that'd be * energy wasted - the worst lost information can do here is give us wrong * information in a statistics view - we'll just potentially be more * conservative in removing files. @@ -1032,7 +1032,7 @@ PhysicalReplicationSlotNewXmin(TransactionId feedbackXmin) SpinLockAcquire(&slot->mutex); MyPgXact->xmin = InvalidTransactionId; /* - * For physical replication we don't need the the interlock provided + * For physical replication we don't need the interlock provided * by xmin and effective_xmin since the consequences of a missed increase * are limited to query cancellations, so set both at once. */ diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c index 7d3c76c994..ef73b4b166 100644 --- a/src/bin/pg_basebackup/receivelog.c +++ b/src/bin/pg_basebackup/receivelog.c @@ -535,7 +535,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline, * possibly re-request, and remove older WAL safely. * * We only report it when a slot has explicitly been used, because - * reporting the flush position makes one elegible as a synchronous + * reporting the flush position makes one eligible as a synchronous * replica. People shouldn't include generic names in * synchronous_standby_names, but we've protected them against it so * far, so let's continue to do so in the situations when possible. -- 2.40.0