From 181bdb90ba1f2361aec5a621c8f1e01736f8cad2 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 6 Feb 2017 11:33:58 +0200 Subject: [PATCH] Fix typos in comments. Backpatch to all supported versions, where applicable, to make backpatching of future fixes go more smoothly. Josh Soref Discussion: https://www.postgresql.org/message-id/CACZqfqCf+5qRztLPgmmosr-B0Ye4srWzzw_mo4c_8_B_mtjmJQ@mail.gmail.com --- configure | 4 ++-- configure.in | 4 ++-- contrib/bloom/blvacuum.c | 2 +- contrib/cube/expected/cube.out | 4 ++-- contrib/cube/expected/cube_2.out | 4 ++-- contrib/cube/sql/cube.sql | 4 ++-- contrib/earthdistance/earthdistance--1.1.sql | 2 +- contrib/isn/ISSN.h | 2 +- contrib/isn/isn.c | 4 ++-- contrib/ltree/expected/ltree.out | 2 +- contrib/ltree/ltxtquery_io.c | 2 +- contrib/ltree/sql/ltree.sql | 2 +- contrib/pg_standby/pg_standby.c | 2 +- contrib/pg_stat_statements/pg_stat_statements.c | 2 +- contrib/pg_trgm/trgm_op.c | 6 +++--- contrib/pgcrypto/mbuf.c | 2 +- contrib/pgcrypto/pgp-mpi-internal.c | 6 +++--- contrib/pgcrypto/pgp-mpi-openssl.c | 6 +++--- contrib/postgres_fdw/expected/postgres_fdw.out | 2 +- contrib/postgres_fdw/sql/postgres_fdw.sql | 2 +- contrib/seg/seg.c | 2 +- contrib/sepgsql/selinux.c | 2 +- contrib/sepgsql/sql/label.sql | 2 +- contrib/spi/refint.c | 2 +- contrib/start-scripts/osx/PostgreSQL | 2 +- contrib/tsearch2/tsearch2--1.0.sql | 2 +- contrib/xml2/xpath.c | 2 +- src/Makefile.shlib | 2 +- src/backend/access/gist/README | 2 +- src/backend/access/hash/hashpage.c | 2 +- src/backend/access/heap/rewriteheap.c | 2 +- src/backend/access/transam/commit_ts.c | 2 +- src/backend/access/transam/xact.c | 2 +- src/backend/catalog/objectaddress.c | 2 +- src/backend/commands/amcmds.c | 2 +- src/backend/commands/dbcommands.c | 6 +++--- src/backend/commands/explain.c | 2 +- src/backend/commands/functioncmds.c | 4 ++-- src/backend/commands/indexcmds.c | 4 ++-- src/backend/commands/publicationcmds.c | 2 +- src/backend/commands/subscriptioncmds.c | 2 +- src/backend/commands/tablecmds.c | 2 +- src/backend/executor/execMain.c | 2 +- src/backend/executor/execParallel.c | 2 +- src/backend/executor/execReplication.c | 2 +- src/backend/executor/nodeAgg.c | 2 +- src/backend/executor/nodeWindowAgg.c | 2 +- src/backend/libpq/hba.c | 2 +- src/backend/optimizer/geqo/geqo_erx.c | 4 ++-- src/backend/optimizer/path/joinpath.c | 2 +- src/backend/optimizer/plan/planmain.c | 2 +- src/backend/optimizer/util/joininfo.c | 2 +- src/backend/optimizer/util/restrictinfo.c | 2 +- src/backend/parser/gram.y | 2 +- src/backend/parser/parse_utilcmd.c | 2 +- src/backend/postmaster/bgwriter.c | 2 +- src/backend/postmaster/postmaster.c | 2 +- src/backend/replication/logical/launcher.c | 8 ++++---- src/backend/replication/logical/origin.c | 4 ++-- src/backend/replication/logical/proto.c | 2 +- src/backend/replication/logical/reorderbuffer.c | 6 +++--- src/backend/replication/logical/snapbuild.c | 6 +++--- src/backend/replication/logical/worker.c | 2 +- src/backend/replication/pgoutput/pgoutput.c | 8 ++++---- src/backend/storage/ipc/latch.c | 2 +- src/backend/storage/ipc/shm_mq.c | 2 +- src/backend/storage/ipc/standby.c | 2 +- src/backend/storage/lmgr/lock.c | 2 +- src/backend/storage/lmgr/lwlock.c | 6 +++--- src/backend/storage/lmgr/predicate.c | 2 +- src/backend/storage/smgr/md.c | 2 +- src/backend/tsearch/spell.c | 2 +- src/backend/tsearch/ts_parse.c | 6 +++--- src/backend/tsearch/wparser_def.c | 2 +- src/backend/utils/adt/formatting.c | 2 +- src/backend/utils/adt/rangetypes_selfuncs.c | 2 +- src/backend/utils/adt/ruleutils.c | 2 +- src/backend/utils/adt/tsrank.c | 4 ++-- src/backend/utils/adt/windowfuncs.c | 2 +- src/backend/utils/cache/relcache.c | 2 +- src/backend/utils/fmgr/funcapi.c | 2 +- src/backend/utils/init/postinit.c | 2 +- src/backend/utils/misc/Makefile | 2 +- src/backend/utils/mmgr/freepage.c | 6 +++--- src/backend/utils/time/tqual.c | 2 +- src/bin/pg_dump/pg_backup_custom.c | 2 +- src/bin/pg_dump/pg_dump.c | 2 +- src/bin/psql/common.c | 2 +- src/bin/psql/describe.c | 6 +++--- src/include/access/visibilitymap.h | 2 +- src/include/access/xact.h | 2 +- src/include/c.h | 2 +- src/include/catalog/partition.h | 2 +- src/include/catalog/pg_subscription.h | 6 +++--- src/include/lib/simplehash.h | 4 ++-- src/include/storage/s_lock.h | 2 +- src/include/tsearch/dicts/spell.h | 2 +- src/interfaces/ecpg/ecpglib/execute.c | 2 +- src/interfaces/ecpg/pgtypeslib/datetime.c | 2 +- src/interfaces/ecpg/pgtypeslib/numeric.c | 4 ++-- src/interfaces/ecpg/preproc/ecpg.header | 2 +- src/interfaces/ecpg/preproc/ecpg.trailer | 8 ++++---- src/interfaces/ecpg/preproc/parse.pl | 2 +- src/interfaces/libpq/fe-auth.c | 2 +- src/interfaces/libpq/libpq-int.h | 4 ++-- src/interfaces/libpq/win32.c | 2 +- src/pl/plperl/ppport.h | 6 +++--- src/pl/plpython/plpy_elog.c | 2 +- src/pl/plpython/plpy_plpymodule.c | 2 +- src/pl/plpython/plpy_typeio.h | 4 ++-- src/test/isolation/specs/receipt-report.spec | 2 +- src/test/isolation/specs/two-ids.spec | 2 +- src/test/regress/expected/alter_table.out | 6 +++--- src/test/regress/expected/create_table.out | 2 +- src/test/regress/expected/indirect_toast.out | 8 ++++---- src/test/regress/expected/init_privs.out | 2 +- src/test/regress/expected/insert_conflict.out | 2 +- src/test/regress/expected/join.out | 2 +- src/test/regress/expected/matview.out | 2 +- src/test/regress/expected/plpgsql.out | 2 +- src/test/regress/expected/replica_identity.out | 2 +- src/test/regress/expected/rolenames.out | 2 +- src/test/regress/expected/rules.out | 2 +- src/test/regress/expected/tsdicts.out | 2 +- src/test/regress/sql/alter_table.sql | 6 +++--- src/test/regress/sql/create_table.sql | 2 +- src/test/regress/sql/indirect_toast.sql | 8 ++++---- src/test/regress/sql/init_privs.sql | 2 +- src/test/regress/sql/insert_conflict.sql | 2 +- src/test/regress/sql/join.sql | 2 +- src/test/regress/sql/matview.sql | 2 +- src/test/regress/sql/plpgsql.sql | 2 +- src/test/regress/sql/replica_identity.sql | 2 +- src/test/regress/sql/rolenames.sql | 2 +- src/test/regress/sql/rules.sql | 2 +- src/test/regress/sql/tsdicts.sql | 2 +- src/test/ssl/ServerSetup.pm | 2 +- 137 files changed, 195 insertions(+), 195 deletions(-) diff --git a/configure b/configure index 9a83f19821..8468417f69 100755 --- a/configure +++ b/configure @@ -7088,7 +7088,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' # When Autoconf chooses install-sh as install program it tries to generate -# a relative path to it in each makefile where it subsitutes it. This clashes +# a relative path to it in each makefile where it substitutes it. This clashes # with our Makefile.global concept. This workaround helps. case $INSTALL in *install-sh*) install_bin='';; @@ -7232,7 +7232,7 @@ fi $as_echo "$MKDIR_P" >&6; } # When Autoconf chooses install-sh as mkdir -p program it tries to generate -# a relative path to it in each makefile where it subsitutes it. This clashes +# a relative path to it in each makefile where it substitutes it. This clashes # with our Makefile.global concept. This workaround helps. case $MKDIR_P in *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';; diff --git a/configure.in b/configure.in index 52e4e78471..01b618c931 100644 --- a/configure.in +++ b/configure.in @@ -887,7 +887,7 @@ fi AC_PROG_INSTALL # When Autoconf chooses install-sh as install program it tries to generate -# a relative path to it in each makefile where it subsitutes it. This clashes +# a relative path to it in each makefile where it substitutes it. This clashes # with our Makefile.global concept. This workaround helps. case $INSTALL in *install-sh*) install_bin='';; @@ -900,7 +900,7 @@ AC_PROG_LN_S AC_PROG_AWK AC_PROG_MKDIR_P # When Autoconf chooses install-sh as mkdir -p program it tries to generate -# a relative path to it in each makefile where it subsitutes it. This clashes +# a relative path to it in each makefile where it substitutes it. This clashes # with our Makefile.global concept. This workaround helps. case $MKDIR_P in *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';; diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c index 807da9254e..04abd0f6b6 100644 --- a/contrib/bloom/blvacuum.c +++ b/contrib/bloom/blvacuum.c @@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats, initBloomState(&state, index); /* - * Interate over the pages. We don't care about concurrently added pages, + * Iterate over the pages. We don't care about concurrently added pages, * they can't contain tuples to delete. */ npages = RelationGetNumberOfBlocks(index); diff --git a/contrib/cube/expected/cube.out b/contrib/cube/expected/cube.out index ada54b2885..328b3b5f5d 100644 --- a/contrib/cube/expected/cube.out +++ b/contrib/cube/expected/cube.out @@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube); 5 (1 row) --- Test of cube_ll_coord function (retrieves LL coodinate values) +-- Test of cube_ll_coord function (retrieves LL coordinate values) -- SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); cube_ll_coord @@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3); 0 (1 row) --- Test of cube_ur_coord function (retrieves UR coodinate values) +-- Test of cube_ur_coord function (retrieves UR coordinate values) -- SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); cube_ur_coord diff --git a/contrib/cube/expected/cube_2.out b/contrib/cube/expected/cube_2.out index c58614ef05..1aa5cf2f98 100644 --- a/contrib/cube/expected/cube_2.out +++ b/contrib/cube/expected/cube_2.out @@ -1056,7 +1056,7 @@ SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube); 5 (1 row) --- Test of cube_ll_coord function (retrieves LL coodinate values) +-- Test of cube_ll_coord function (retrieves LL coordinate values) -- SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); cube_ll_coord @@ -1112,7 +1112,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 3); 0 (1 row) --- Test of cube_ur_coord function (retrieves UR coodinate values) +-- Test of cube_ur_coord function (retrieves UR coordinate values) -- SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); cube_ur_coord diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql index a61fba1ea8..58ea3ad811 100644 --- a/contrib/cube/sql/cube.sql +++ b/contrib/cube/sql/cube.sql @@ -256,7 +256,7 @@ SELECT cube_dim('(0,0,0)'::cube); SELECT cube_dim('(42,42,42),(42,42,42)'::cube); SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube); --- Test of cube_ll_coord function (retrieves LL coodinate values) +-- Test of cube_ll_coord function (retrieves LL coordinate values) -- SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2); @@ -268,7 +268,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1); SELECT cube_ll_coord('(42,137)'::cube, 2); SELECT cube_ll_coord('(42,137)'::cube, 3); --- Test of cube_ur_coord function (retrieves UR coodinate values) +-- Test of cube_ur_coord function (retrieves UR coordinate values) -- SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1); SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2); diff --git a/contrib/earthdistance/earthdistance--1.1.sql b/contrib/earthdistance/earthdistance--1.1.sql index 657d328ebb..9136a54a7b 100644 --- a/contrib/earthdistance/earthdistance--1.1.sql +++ b/contrib/earthdistance/earthdistance--1.1.sql @@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8 LANGUAGE SQL IMMUTABLE PARALLEL SAFE AS 'SELECT ''6378168''::float8'; --- Astromers may want to change the earth function so that distances will be +-- Astronomers may want to change the earth function so that distances will be -- returned in degrees. To do this comment out the above definition and -- uncomment the one below. Note that doing this will break the regression -- tests. diff --git a/contrib/isn/ISSN.h b/contrib/isn/ISSN.h index 082efcff7c..585f0e2674 100644 --- a/contrib/isn/ISSN.h +++ b/contrib/isn/ISSN.h @@ -23,7 +23,7 @@ * Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103 * 103 / 10 = 10 remainder 3 * Check digit 10 - 3 = 7 - * => 977-1144875-00-7 ?? <- suplemental number (number of the week, month, etc.) + * => 977-1144875-00-7 ?? <- supplemental number (number of the week, month, etc.) * ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...) * * The hyphenation is always in after the four digits of the ISSN code. diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 9e125b83d7..c3c10e14bc 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI) * into bufO using the given hyphenation range TABLE. * Assumes the input string to be used is of only digits. * - * Returns the number of characters acctually hyphenated. + * Returns the number of characters actually hyphenated. */ static unsigned hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2]) @@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK, ean13 *result, } else if (*aux2 == '!' && *(aux2 + 1) == '\0') { - /* the invalid check digit sufix was found, set it */ + /* the invalid check digit suffix was found, set it */ if (!magic) valid = false; magic = true; diff --git a/contrib/ltree/expected/ltree.out b/contrib/ltree/expected/ltree.out index db52069c26..3d5737d41b 100644 --- a/contrib/ltree/expected/ltree.out +++ b/contrib/ltree/expected/ltree.out @@ -1113,7 +1113,7 @@ SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}'; t (1 row) ---exractors +--extractors SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null; ?column? ---------- diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c index befda1344d..32d9046258 100644 --- a/contrib/ltree/ltxtquery_io.c +++ b/contrib/ltree/ltxtquery_io.c @@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type, char *strval, int lenval, uint16 flag) #define STACKDEPTH 32 /* - * make polish notaion of query + * make polish notation of query */ static int32 makepol(QPRS_STATE *state) diff --git a/contrib/ltree/sql/ltree.sql b/contrib/ltree/sql/ltree.sql index b4f62e3feb..e9f74909a6 100644 --- a/contrib/ltree/sql/ltree.sql +++ b/contrib/ltree/sql/ltree.sql @@ -209,7 +209,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e, a.*}'; SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}'; SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}'; ---exractors +--extractors SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null; SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4'; SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3'; diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c index e4136f9149..e4d057e18e 100644 --- a/contrib/pg_standby/pg_standby.c +++ b/contrib/pg_standby/pg_standby.c @@ -779,7 +779,7 @@ main(int argc, char **argv) { /* * Once we have restored this file successfully we can remove some - * prior WAL files. If this restore fails we musn't remove any + * prior WAL files. If this restore fails we mustn't remove any * file because some of them will be requested again immediately * after the failed restore, or when we restart recovery. */ diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index a65b52968a..62dec8768a 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -139,7 +139,7 @@ typedef struct Counters { int64 calls; /* # of times executed */ double total_time; /* total execution time, in msec */ - double min_time; /* minimim execution time in msec */ + double min_time; /* minimum execution time in msec */ double max_time; /* maximum execution time in msec */ double mean_time; /* mean execution time in msec */ double sum_var_time; /* sum of variances in execution time in msec */ diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c index dd0f492cfa..368e7c8941 100644 --- a/contrib/pg_trgm/trgm_op.c +++ b/contrib/pg_trgm/trgm_op.c @@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v2) * ulen1: count of unique trigrams of array "trg1". * len2: length of array "trg2" and array "trg2indexes". * len: length of the array "found". - * check_only: if true then only check existaince of similar search pattern in + * check_only: if true then only check existence of similar search pattern in * text. * * Returns word similarity. @@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes, lastpos[trgindex] = i; } - /* Adjust lower bound if this trigram is present in required substing */ + /* Adjust lower bound if this trigram is present in required substring */ if (found[trgindex]) { int prev_lower, @@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes, * * str1: search pattern string, of length slen1 bytes. * str2: text in which we are looking for a word, of length slen2 bytes. - * check_only: if true then only check existaince of similar search pattern in + * check_only: if true then only check existence of similar search pattern in * text. * * Returns word similarity. diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c index 44d9adcd2a..73dbfbd08f 100644 --- a/contrib/pgcrypto/mbuf.c +++ b/contrib/pgcrypto/mbuf.c @@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf) } /* - * caller wants exatly len bytes and dont bother with references + * caller wants exactly len bytes and don't bother with references */ int pullf_read_fixed(PullFilter *src, int len, uint8 *dst) diff --git a/contrib/pgcrypto/pgp-mpi-internal.c b/contrib/pgcrypto/pgp-mpi-internal.c index cb70fcba6c..545009ce19 100644 --- a/contrib/pgcrypto/pgp-mpi-internal.c +++ b/contrib/pgcrypto/pgp-mpi-internal.c @@ -141,7 +141,7 @@ bn_to_mpi(mpz_t *bn) } /* - * Decide the number of bits in the random componont k + * Decide the number of bits in the random component k * * It should be in the same range as p for signing (which * is deprecated), but can be much smaller for encrypting. @@ -149,8 +149,8 @@ bn_to_mpi(mpz_t *bn) * Until I research it further, I just mimic gpg behaviour. * It has a special mapping table, for values <= 5120, * above that it uses 'arbitrary high number'. Following - * algorihm hovers 10-70 bits above gpg values. And for - * larger p, it uses gpg's algorihm. + * algorithm hovers 10-70 bits above gpg values. And for + * larger p, it uses gpg's algorithm. * * The point is - if k gets large, encryption will be * really slow. It does not matter for decryption. diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c b/contrib/pgcrypto/pgp-mpi-openssl.c index 24484a6c54..afece26918 100644 --- a/contrib/pgcrypto/pgp-mpi-openssl.c +++ b/contrib/pgcrypto/pgp-mpi-openssl.c @@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn) } /* - * Decide the number of bits in the random componont k + * Decide the number of bits in the random component k * * It should be in the same range as p for signing (which * is deprecated), but can be much smaller for encrypting. @@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn) * Until I research it further, I just mimic gpg behaviour. * It has a special mapping table, for values <= 5120, * above that it uses 'arbitrary high number'. Following - * algorihm hovers 10-70 bits above gpg values. And for - * larger p, it uses gpg's algorihm. + * algorithm hovers 10-70 bits above gpg values. And for + * larger p, it uses gpg's algorithm. * * The point is - if k gets large, encryption will be * really slow. It does not matter for decryption. diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 3a092804a2..0b9e3e4537 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -2057,7 +2057,7 @@ SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM 1 (10 rows) --- non-Var items in targelist of the nullable rel of a join preventing +-- non-Var items in targetlist of the nullable rel of a join preventing -- push-down in some cases -- unable to push {ft1, ft2} EXPLAIN (VERBOSE, COSTS OFF) diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index e19a3ef398..56b01d0490 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -493,7 +493,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10; SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10; --- non-Var items in targelist of the nullable rel of a join preventing +-- non-Var items in targetlist of the nullable rel of a join preventing -- push-down in some cases -- unable to push {ft1, ft2} EXPLAIN (VERBOSE, COSTS OFF) diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c index c6c082b8ea..895d879498 100644 --- a/contrib/seg/seg.c +++ b/contrib/seg/seg.c @@ -888,7 +888,7 @@ restore(char *result, float val, int n) if (Abs(exp) <= 4) { /* - * remove the decimal point from the mantyssa and write the digits + * remove the decimal point from the mantissa and write the digits * to the buf array */ for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++) diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c index 3e2cfab819..7728a18333 100644 --- a/contrib/sepgsql/selinux.c +++ b/contrib/sepgsql/selinux.c @@ -23,7 +23,7 @@ * When we ask SELinux whether the required privileges are allowed or not, * we use security_compute_av(3). It needs us to represent object classes * and access vectors using 'external' codes defined in the security policy. - * It is determinded in the runtime, not build time. So, it needs an internal + * It is determined in the runtime, not build time. So, it needs an internal * service to translate object class/access vectors which we want to check * into the code which kernel want to be given. */ diff --git a/contrib/sepgsql/sql/label.sql b/contrib/sepgsql/sql/label.sql index 04085e57a4..49780b2697 100644 --- a/contrib/sepgsql/sql/label.sql +++ b/contrib/sepgsql/sql/label.sql @@ -206,7 +206,7 @@ SELECT * FROM auth_tbl; -- failed SELECT sepgsql_setcon(NULL); -- end of session SELECT sepgsql_getcon(); --- the pooler cannot touch these tables directry +-- the pooler cannot touch these tables directly SELECT * FROM foo_tbl; -- failed SELECT * FROM var_tbl; -- failed diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c index 78cfedf219..208ff6103d 100644 --- a/contrib/spi/refint.c +++ b/contrib/spi/refint.c @@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS) /* internal error */ elog(ERROR, "check_primary_key: cannot process DELETE events"); - /* If UPDATion the must check new Tuple, not old one */ + /* If UPDATE, then must check new Tuple, not old one */ else tuple = trigdata->tg_newtuple; diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL index 9735c8c57f..7ff1d0e377 100755 --- a/contrib/start-scripts/osx/PostgreSQL +++ b/contrib/start-scripts/osx/PostgreSQL @@ -29,7 +29,7 @@ # modified by Ray Aspeitia 12-03-2003 : # added log rotation script to db startup # modified StartupParameters.plist "Provides" parameter to make it easier to -# start and stop with the SystemStarter utitlity +# start and stop with the SystemStarter utility # use the below command in order to correctly start/stop/restart PG with log rotation script: # SystemStarter [start|stop|restart] PostgreSQL diff --git a/contrib/tsearch2/tsearch2--1.0.sql b/contrib/tsearch2/tsearch2--1.0.sql index a32c5fe85b..68bb43fd7c 100644 --- a/contrib/tsearch2/tsearch2--1.0.sql +++ b/contrib/tsearch2/tsearch2--1.0.sql @@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text) LANGUAGE INTERNAL RETURNS NULL ON NULL INPUT; ---reset - just for debuging +--reset - just for debugging CREATE FUNCTION reset_tsearch() RETURNS void as 'MODULE_PATHNAME', 'tsa_reset_tsearch' diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c index ac28996867..73b74c875e 100644 --- a/contrib/xml2/xpath.c +++ b/contrib/xml2/xpath.c @@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS) /* * At the moment we assume that the returned attributes make sense for the - * XPath specififed (i.e. we trust the caller). It's not fatal if they get + * XPath specified (i.e. we trust the caller). It's not fatal if they get * it wrong - the input function for the column type will raise an error * if the path result can't be converted into the correct binary * representation. diff --git a/src/Makefile.shlib b/src/Makefile.shlib index c293a34d1a..35e2dd8690 100644 --- a/src/Makefile.shlib +++ b/src/Makefile.shlib @@ -377,7 +377,7 @@ $(shlib): $(OBJS) $(DLL_DEFFILE) | $(SHLIB_PREREQS) $(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib) endif -endif # PORTNAME == cgywin +endif # PORTNAME == cygwin endif # PORTNAME == cygwin || PORTNAME == win32 diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README index dd4c9fa70a..02228662b8 100644 --- a/src/backend/access/gist/README +++ b/src/backend/access/gist/README @@ -28,7 +28,7 @@ The current implementation of GiST supports: The support for concurrency implemented in PostgreSQL was developed based on the paper "Access Methods for Next-Generation Database Systems" by -Marcel Kornaker: +Marcel Kornacker: http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c index 9430794207..69676eba95 100644 --- a/src/backend/access/hash/hashpage.c +++ b/src/backend/access/hash/hashpage.c @@ -1077,7 +1077,7 @@ _hash_splitbucket_guts(Relation rel, * already moved before the split operation was previously interrupted. * * The caller must hold a pin, but no lock, on the metapage and old bucket's - * primay page buffer. The buffers are returned in the same state. (The + * primary page buffer. The buffers are returned in the same state. (The * metapage is only touched if it becomes necessary to add or remove overflow * pages.) */ diff --git a/src/backend/access/heap/rewriteheap.c b/src/backend/access/heap/rewriteheap.c index 90ab6f2421..c7b283c198 100644 --- a/src/backend/access/heap/rewriteheap.c +++ b/src/backend/access/heap/rewriteheap.c @@ -209,7 +209,7 @@ typedef struct RewriteMappingFile } RewriteMappingFile; /* - * A single In-Memeory logical rewrite mapping, hanging of + * A single In-Memory logical rewrite mapping, hanging off * RewriteMappingFile->mappings. */ typedef struct RewriteMappingDataEntry diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c index 18a5f5602c..20f60bc023 100644 --- a/src/backend/access/transam/commit_ts.c +++ b/src/backend/access/transam/commit_ts.c @@ -615,7 +615,7 @@ CommitTsParameterChange(bool newvalue, bool oldvalue) /* * Activate this module whenever necessary. - * This must happen during postmaster or standalong-backend startup, + * This must happen during postmaster or standalone-backend startup, * or during WAL replay anytime the track_commit_timestamp setting is * changed in the master. * diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c index f6f136da3a..82f9a3c5c6 100644 --- a/src/backend/access/transam/xact.c +++ b/src/backend/access/transam/xact.c @@ -2752,7 +2752,7 @@ CommitTransactionCommand(void) * These shouldn't happen. TBLOCK_DEFAULT means the previous * StartTransactionCommand didn't set the STARTED state * appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended - * by EndParallelWorkerTranaction(), not this function. + * by EndParallelWorkerTransaction(), not this function. */ case TBLOCK_DEFAULT: case TBLOCK_PARALLEL_INPROGRESS: diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 2a38792ed6..a3bb2f1c0e 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -770,7 +770,7 @@ static void getRelationIdentity(StringInfo buffer, Oid relid, List **objname); * * Note: If the object is not found, we don't give any indication of the * reason. (It might have been a missing schema if the name was qualified, or - * an inexistant type name in case of a cast, function or operator; etc). + * a nonexistent type name in case of a cast, function or operator; etc). * Currently there is only one caller that might be interested in such info, so * we don't spend much effort here. If more callers start to care, it might be * better to add some support for that in this function. diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c index 225e6f636c..7e0a9aa0fd 100644 --- a/src/backend/commands/amcmds.c +++ b/src/backend/commands/amcmds.c @@ -34,7 +34,7 @@ static const char *get_am_type_string(char amtype); /* - * CreateAcessMethod + * CreateAccessMethod * Registers a new access method. */ ObjectAddress diff --git a/src/backend/commands/dbcommands.c b/src/backend/commands/dbcommands.c index 30000a1eeb..1ebacbc24f 100644 --- a/src/backend/commands/dbcommands.c +++ b/src/backend/commands/dbcommands.c @@ -685,7 +685,7 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt) /* * Force synchronous commit, thus minimizing the window between - * creation of the database files and commital of the transaction. If + * creation of the database files and committal of the transaction. If * we crash before committing, we'll have a DB that's taking up disk * space but is not in pg_database, which is not good. */ @@ -955,7 +955,7 @@ dropdb(const char *dbname, bool missing_ok) /* * Force synchronous commit, thus minimizing the window between removal of - * the database files and commital of the transaction. If we crash before + * the database files and committal of the transaction. If we crash before * committing, we'll have a DB that's gone on disk but still there * according to pg_database, which is not good. */ @@ -1309,7 +1309,7 @@ movedb(const char *dbname, const char *tblspcname) /* * Force synchronous commit, thus minimizing the window between - * copying the database files and commital of the transaction. If we + * copying the database files and committal of the transaction. If we * crash before committing, we'll leave an orphaned set of files on * disk, which is not fatal but not good either. */ diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 0a67be031b..c9e0a3e42d 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -3401,7 +3401,7 @@ ExplainYAMLLineStarting(ExplainState *es) } /* - * YAML is a superset of JSON; unfortuantely, the YAML quoting rules are + * YAML is a superset of JSON; unfortunately, the YAML quoting rules are * ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of * http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything. * Empty strings, strings with leading or trailing whitespace, and strings diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c index dd83858b3d..8b1285a542 100644 --- a/src/backend/commands/functioncmds.c +++ b/src/backend/commands/functioncmds.c @@ -1040,7 +1040,7 @@ CreateFunction(ParseState *pstate, CreateFunctionStmt *stmt) } else { - /* store SQL NULL instead of emtpy array */ + /* store SQL NULL instead of empty array */ trftypes = NULL; } @@ -1441,7 +1441,7 @@ CreateCast(CreateCastStmt *stmt) (errcode(ERRCODE_WRONG_OBJECT_TYPE), errmsg("cast will be ignored because the target data type is a domain"))); - /* Detemine the cast method */ + /* Determine the cast method */ if (stmt->func != NULL) castmethod = COERCION_METHOD_FUNCTION; else if (stmt->inout) diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c index ed6136c153..f4814c095b 100644 --- a/src/backend/commands/indexcmds.c +++ b/src/backend/commands/indexcmds.c @@ -99,7 +99,7 @@ static void RangeVarCallbackForReindexIndex(const RangeVar *relation, * Errors arising from the attribute list still apply. * * Most column type changes that can skip a table rewrite do not invalidate - * indexes. We ackowledge this when all operator classes, collations and + * indexes. We acknowledge this when all operator classes, collations and * exclusion operators match. Though we could further permit intra-opfamily * changes for btree and hash indexes, that adds subtle complexity with no * concrete benefit for core types. @@ -965,7 +965,7 @@ CheckMutability(Expr *expr) * indxpath.c could do something with. However, that seems overly * restrictive. One useful application of partial indexes is to apply * a UNIQUE constraint across a subset of a table, and in that scenario - * any evaluatable predicate will work. So accept any predicate here + * any evaluable predicate will work. So accept any predicate here * (except ones requiring a plan), and let indxpath.c fend for itself. */ static void diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index 3fe1d15052..04f83e0a2e 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -525,7 +525,7 @@ OpenTableList(List *tables) myrelid = RelationGetRelid(rel); /* * filter out duplicates when user specifies "foo, foo" - * Note that this algrithm is know to not be very effective (O(N^2)) + * Note that this algorithm is know to not be very effective (O(N^2)) * but given that it only works on list of tables given to us by user * it's deemed acceptable. */ diff --git a/src/backend/commands/subscriptioncmds.c b/src/backend/commands/subscriptioncmds.c index 4353e14e1b..ab21e64b48 100644 --- a/src/backend/commands/subscriptioncmds.c +++ b/src/backend/commands/subscriptioncmds.c @@ -474,7 +474,7 @@ DropSubscription(DropSubscriptionStmt *stmt) InvokeObjectDropHook(SubscriptionRelationId, subid, 0); /* - * Lock the subscription so noboby else can do anything with it + * Lock the subscription so nobody else can do anything with it * (including the replication workers). */ LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock); diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 878b48d39e..37a4c4a3d6 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -6630,7 +6630,7 @@ ATAddCheckConstraint(List **wqueue, AlteredTableInfo *tab, Relation rel, /* * Check if ONLY was specified with ALTER TABLE. If so, allow the - * contraint creation only if there are no children currently. Error out + * constraint creation only if there are no children currently. Error out * otherwise. */ if (!recurse && children != NIL) diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index ce6600bde7..a66639178a 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1261,7 +1261,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo, resultRelInfo->ri_projectReturning = NULL; /* - * If partition_root has been specified, that means we are builiding the + * If partition_root has been specified, that means we are building the * ResultRelationInfo for one of its leaf partitions. In that case, we * need *not* initialize the leaf partition's constraint, but rather the * the partition_root's (if any). We must do that explicitly like this, diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c index e01fe6da96..fe87c9ae71 100644 --- a/src/backend/executor/execParallel.c +++ b/src/backend/executor/execParallel.c @@ -533,7 +533,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, int plan_node_id = planstate->plan->plan_node_id; MemoryContext oldcontext; - /* Find the instumentation for this node. */ + /* Find the instrumentation for this node. */ for (i = 0; i < instrumentation->num_plan_nodes; ++i) if (instrumentation->plan_node_id[i] == plan_node_id) break; diff --git a/src/backend/executor/execReplication.c b/src/backend/executor/execReplication.c index a8bd5832c9..ebf3f6b3c9 100644 --- a/src/backend/executor/execReplication.c +++ b/src/backend/executor/execReplication.c @@ -391,7 +391,7 @@ ExecSimpleRelationInsert(EState *estate, TupleTableSlot *slot) if (rel->rd_att->constr) ExecConstraints(resultRelInfo, slot, slot, estate); - /* Store the slot into tuple that we can insett. */ + /* Store the slot into tuple that we can inspect. */ tuple = ExecMaterializeSlot(slot); /* OK, store the tuple and create index entries for it */ diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 5c6079af80..aa08152350 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -304,7 +304,7 @@ typedef struct AggStatePerTransData /* * Slots for holding the evaluated input arguments. These are set up * during ExecInitAgg() and then used for each input row requiring - * procesessing besides what's done in AggState->evalproj. + * processing besides what's done in AggState->evalproj. */ TupleTableSlot *sortslot; /* current input tuple */ TupleTableSlot *uniqslot; /* used for multi-column DISTINCT */ diff --git a/src/backend/executor/nodeWindowAgg.c b/src/backend/executor/nodeWindowAgg.c index 6ac6b83cdd..2a123e8452 100644 --- a/src/backend/executor/nodeWindowAgg.c +++ b/src/backend/executor/nodeWindowAgg.c @@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *winstate, /* * We must track the number of rows included in transValue, since to - * remove the last input, advance_windowaggregate_base() musn't call the + * remove the last input, advance_windowaggregate_base() mustn't call the * inverse transition function, but simply reset transValue back to its * initial value. */ diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c index 95ded23791..be63a4bc63 100644 --- a/src/backend/libpq/hba.c +++ b/src/backend/libpq/hba.c @@ -109,7 +109,7 @@ static MemoryContext parsed_hba_context = NULL; * * NOTE: the IdentLine structs can contain pre-compiled regular expressions * that live outside the memory context. Before destroying or resetting the - * memory context, they need to be expliticly free'd. + * memory context, they need to be explicitly free'd. */ static List *parsed_ident_lines = NIL; static MemoryContext parsed_ident_context = NULL; diff --git a/src/backend/optimizer/geqo/geqo_erx.c b/src/backend/optimizer/geqo/geqo_erx.c index 1a43ab7288..023abf70e2 100644 --- a/src/backend/optimizer/geqo/geqo_erx.c +++ b/src/backend/optimizer/geqo/geqo_erx.c @@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene *tour1, Gene *tour2, for (index1 = 0; index1 < num_gene; index1++) { /* - * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton + * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation * maps n back to 1 */ @@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge, Edge *edge_table) /* * give priority to candidates with fewest remaining unused edges; * find out what the minimum number of unused edges is - * (minimum_edges); if there is more than one cadidate with the + * (minimum_edges); if there is more than one candidate with the * minimum number of unused edges keep count of this number * (minimum_count); */ diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c index 7c30ec6fb9..2897245883 100644 --- a/src/backend/optimizer/path/joinpath.c +++ b/src/backend/optimizer/path/joinpath.c @@ -1618,7 +1618,7 @@ select_mergejoin_clauses(PlannerInfo *root, /* * Insist that each side have a non-redundant eclass. This * restriction is needed because various bits of the planner expect - * that each clause in a merge be associatable with some pathkey in a + * that each clause in a merge be associable with some pathkey in a * canonical pathkey list, but redundant eclasses can't appear in * canonical sort orderings. (XXX it might be worth relaxing this, * but not enough time to address it for 8.3.) diff --git a/src/backend/optimizer/plan/planmain.c b/src/backend/optimizer/plan/planmain.c index e8807591a0..3c58d0596c 100644 --- a/src/backend/optimizer/plan/planmain.c +++ b/src/backend/optimizer/plan/planmain.c @@ -195,7 +195,7 @@ query_planner(PlannerInfo *root, List *tlist, /* * Now distribute "placeholders" to base rels as needed. This has to be * done after join removal because removal could change whether a - * placeholder is evaluatable at a base rel. + * placeholder is evaluable at a base rel. */ add_placeholders_to_base_rels(root); diff --git a/src/backend/optimizer/util/joininfo.c b/src/backend/optimizer/util/joininfo.c index 6d84477f86..62629ee7d8 100644 --- a/src/backend/optimizer/util/joininfo.c +++ b/src/backend/optimizer/util/joininfo.c @@ -24,7 +24,7 @@ * Detect whether there is a joinclause that involves * the two given relations. * - * Note: the joinclause does not have to be evaluatable with only these two + * Note: the joinclause does not have to be evaluable with only these two * relations. This is intentional. For example consider * SELECT * FROM a, b, c WHERE a.x = (b.y + c.z) * If a is much larger than the other tables, it may be worthwhile to diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c index 8f10520f81..045b5cf539 100644 --- a/src/backend/optimizer/util/restrictinfo.c +++ b/src/backend/optimizer/util/restrictinfo.c @@ -515,7 +515,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo, Relids currentrelids, Relids current_and_outer) { - /* Clause must be evaluatable given available context */ + /* Clause must be evaluable given available context */ if (!bms_is_subset(rinfo->clause_relids, current_and_outer)) return false; diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index a4edea08a3..cf97be512d 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -11312,7 +11312,7 @@ table_ref: relation_expr opt_alias_clause n->lateral = true; n->subquery = $2; n->alias = $3; - /* same coment as above */ + /* same comment as above */ if ($3 == NULL) { if (IsA($2, SelectStmt) && diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c index 0e4e7a8c80..8d1939445b 100644 --- a/src/backend/parser/parse_utilcmd.c +++ b/src/backend/parser/parse_utilcmd.c @@ -3050,7 +3050,7 @@ transformAttachPartition(CreateStmtContext *cxt, PartitionCmd *cmd) errmsg("\"%s\" is not partitioned", RelationGetRelationName(parentRel)))); - /* tranform the values */ + /* transform the values */ Assert(RelationGetPartitionKey(parentRel) != NULL); cxt->partbound = transformPartitionBound(cxt->pstate, parentRel, cmd->bound); diff --git a/src/backend/postmaster/bgwriter.c b/src/backend/postmaster/bgwriter.c index 40819824ad..dcb4cf249c 100644 --- a/src/backend/postmaster/bgwriter.c +++ b/src/backend/postmaster/bgwriter.c @@ -211,7 +211,7 @@ BackgroundWriterMain(void) /* Flush any leaked data in the top-level context */ MemoryContextResetAndDeleteChildren(bgwriter_context); - /* re-initilialize to avoid repeated errors causing problems */ + /* re-initialize to avoid repeated errors causing problems */ WritebackContextInit(&wb_context, &bgwriter_flush_after); /* Now we can allow interrupts again */ diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 91ccbe78c0..271c492000 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -5156,7 +5156,7 @@ RandomCancelKey(int32 *cancel_key) } /* - * Count up number of child processes of specified types (dead_end chidren + * Count up number of child processes of specified types (dead_end children * are always excluded). */ static int diff --git a/src/backend/replication/logical/launcher.c b/src/backend/replication/logical/launcher.c index d222cff708..e9ce061e83 100644 --- a/src/backend/replication/logical/launcher.c +++ b/src/backend/replication/logical/launcher.c @@ -170,7 +170,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, /* * Worker started and attached to our shmem. This check is safe - * because only laucher ever starts the workers, so nobody can steal + * because only launcher ever starts the workers, so nobody can steal * the worker slot. */ if (status == BGWH_STARTED && worker->proc) @@ -180,7 +180,7 @@ WaitForReplicationWorkerAttach(LogicalRepWorker *worker, return false; /* - * We need timeout because we generaly don't get notified via latch + * We need timeout because we generally don't get notified via latch * about the worker attach. */ rc = WaitLatch(MyLatch, @@ -533,7 +533,7 @@ AtCommit_ApplyLauncher(void) /* * Request wakeup of the launcher on commit of the transaction. * - * This is used to send launcher signal to stop sleeping and proccess the + * This is used to send launcher signal to stop sleeping and process the * subscriptions when current transaction commits. Should be used when new * tuple was added to the pg_subscription catalog. */ @@ -638,7 +638,7 @@ ApplyLauncherMain(Datum main_arg) else { /* - * The wait in previous cycle was interruped in less than + * The wait in previous cycle was interrupted in less than * wal_retrieve_retry_interval since last worker was started, * this usually means crash of the worker, so we should retry * in wal_retrieve_retry_interval again. diff --git a/src/backend/replication/logical/origin.c b/src/backend/replication/logical/origin.c index ade80d407f..bf84c68a0c 100644 --- a/src/backend/replication/logical/origin.c +++ b/src/backend/replication/logical/origin.c @@ -1250,7 +1250,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS) * Return the replication progress for origin setup in the current session. * * If 'flush' is set to true it is ensured that the returned value corresponds - * to a local transaction that has been flushed. this is useful if asychronous + * to a local transaction that has been flushed. this is useful if asynchronous * commits are used when replaying replicated transactions. */ Datum @@ -1336,7 +1336,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS) * Return the replication progress for an individual replication origin. * * If 'flush' is set to true it is ensured that the returned value corresponds - * to a local transaction that has been flushed. this is useful if asychronous + * to a local transaction that has been flushed. this is useful if asynchronous * commits are used when replaying replicated transactions. */ Datum diff --git a/src/backend/replication/logical/proto.c b/src/backend/replication/logical/proto.c index 1f30de606a..142cd993cd 100644 --- a/src/backend/replication/logical/proto.c +++ b/src/backend/replication/logical/proto.c @@ -539,7 +539,7 @@ logicalrep_write_attrs(StringInfo out, Relation rel) if (att->attisdropped) continue; - /* REPLICA IDENTITY FULL means all colums are sent as part of key. */ + /* REPLICA IDENTITY FULL means all columns are sent as part of key. */ if (replidentfull || bms_is_member(att->attnum - FirstLowInvalidHeapAttributeNumber, idattrs)) diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index d805ef4fb7..7dc97fa796 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -1714,7 +1714,7 @@ ReorderBufferCommit(ReorderBuffer *rb, TransactionId xid, * * NB: Transactions handled here have to have actively aborted (i.e. have * produced an abort record). Implicitly aborted transactions are handled via - * ReorderBufferAbortOld(); transactions we're just not interesteded in, but + * ReorderBufferAbortOld(); transactions we're just not interested in, but * which have committed are handled in ReorderBufferForget(). * * This function purges this transaction and its contents from memory and @@ -1782,7 +1782,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb, TransactionId oldestRunningXid) * toplevel xid. * * This is significantly different to ReorderBufferAbort() because - * transactions that have committed need to be treated differenly from aborted + * transactions that have committed need to be treated differently from aborted * ones since they may have modified the catalog. * * Note that this is only allowed to be called in the moment a transaction @@ -2660,7 +2660,7 @@ StartupReorderBuffer(void) /* * ok, has to be a surviving logical slot, iterate and delete - * everythign starting with xid-* + * everything starting with xid-* */ sprintf(path, "pg_replslot/%s", logical_de->d_name); diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c index 1e02aa9bd8..62020b6ed0 100644 --- a/src/backend/replication/logical/snapbuild.c +++ b/src/backend/replication/logical/snapbuild.c @@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *builder, TransactionId xid) if (builder->snapshot == NULL) { builder->snapshot = SnapBuildBuildSnapshot(builder, xid); - /* inrease refcount for the snapshot builder */ + /* increase refcount for the snapshot builder */ SnapBuildSnapIncRefcount(builder->snapshot); } @@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builder, TransactionId xid, XLogRecPtr lsn) if (builder->snapshot == NULL) { builder->snapshot = SnapBuildBuildSnapshot(builder, xid); - /* inrease refcount for the snapshot builder */ + /* increase refcount for the snapshot builder */ SnapBuildSnapIncRefcount(builder->snapshot); } @@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLogRecPtr lsn, TransactionId xid) { /* * None of the originally running transaction is running anymore, - * so our incrementaly built snapshot now is consistent. + * so our incrementally built snapshot now is consistent. */ ereport(LOG, (errmsg("logical decoding found consistent point at %X/%X", diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c index 9383960da7..0b19feca40 100644 --- a/src/backend/replication/logical/worker.c +++ b/src/backend/replication/logical/worker.c @@ -327,7 +327,7 @@ slot_store_cstrings(TupleTableSlot *slot, LogicalRepRelMapEntry *rel, /* * Modify slot with user data provided as C strigs. * This is somewhat similar to heap_modify_tuple but also calls the type - * input fuction on the user data as the input is the text representation + * input function on the user data as the input is the text representation * of the types. */ static void diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c index 08c30af88a..0ceb4be375 100644 --- a/src/backend/replication/pgoutput/pgoutput.c +++ b/src/backend/replication/pgoutput/pgoutput.c @@ -172,7 +172,7 @@ pgoutput_startup(LogicalDecodingContext * ctx, OutputPluginOptions *opt, &data->protocol_version, &data->publication_names); - /* Check if we support requested protol */ + /* Check if we support requested protocol */ if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), @@ -424,7 +424,7 @@ publication_invalidation_cb(Datum arg, int cacheid, uint32 hashvalue) /* * Initialize the relation schema sync cache for a decoding session. * - * The hash table is destoyed at the end of a decoding session. While + * The hash table is destroyed at the end of a decoding session. While * relcache invalidations still exist and will still be invoked, they * will just see the null hash table global and take no action. */ @@ -540,7 +540,7 @@ rel_sync_cache_relation_cb(Datum arg, Oid relid) /* * We can get here if the plugin was used in SQL interface as the - * RelSchemaSyncCache is detroyed when the decoding finishes, but there + * RelSchemaSyncCache is destroyed when the decoding finishes, but there * is no way to unregister the relcache invalidation callback. */ if (RelationSyncCache == NULL) @@ -580,7 +580,7 @@ rel_sync_cache_publication_cb(Datum arg, int cacheid, uint32 hashvalue) /* * We can get here if the plugin was used in SQL interface as the - * RelSchemaSyncCache is detroyed when the decoding finishes, but there + * RelSchemaSyncCache is destroyed when the decoding finishes, but there * is no way to unregister the relcache invalidation callback. */ if (RelationSyncCache == NULL) diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c index d45a41d863..0079ba567f 100644 --- a/src/backend/storage/ipc/latch.c +++ b/src/backend/storage/ipc/latch.c @@ -860,7 +860,7 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event) * reached. At most nevents occurred events are returned. * * If timeout = -1, block until an event occurs; if 0, check sockets for - * readiness, but don't block; if > 0, block for at most timeout miliseconds. + * readiness, but don't block; if > 0, block for at most timeout milliseconds. * * Returns the number of events occurred, or 0 if the timeout was reached. * diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index 32b4d3d5d4..f5bf807cd6 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -501,7 +501,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait) * it will point to a temporary buffer. This mostly avoids data copying in * the hoped-for case where messages are short compared to the buffer size, * while still allowing longer messages. In either case, the return value - * remains valid until the next receive operation is perfomed on the queue. + * remains valid until the next receive operation is performed on the queue. * * When nowait = false, we'll wait on our process latch when the ring buffer * is empty and we have not yet received a full message. The sender will diff --git a/src/backend/storage/ipc/standby.c b/src/backend/storage/ipc/standby.c index 6532240dd1..6259070722 100644 --- a/src/backend/storage/ipc/standby.c +++ b/src/backend/storage/ipc/standby.c @@ -967,7 +967,7 @@ LogStandbySnapshot(void) * similar. We keep them separate because xl_xact_running_xacts is a * contiguous chunk of memory and never exists fully until it is assembled in * WAL. The inserted records are marked as not being important for durability, - * to avoid triggering superflous checkpoint / archiving activity. + * to avoid triggering superfluous checkpoint / archiving activity. */ static XLogRecPtr LogCurrentRunningXacts(RunningTransactions CurrRunningXacts) diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index e9703f1866..ad64a79fa1 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -2778,7 +2778,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode) vxids = (VirtualTransactionId *) palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1)); - /* Compute hash code and partiton lock, and look up conflicting modes. */ + /* Compute hash code and partition lock, and look up conflicting modes. */ hashcode = LockTagHashCode(locktag); partitionLock = LockHashPartitionLock(hashcode); conflictMask = lockMethodTable->conflictTab[lockmode]; diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index c196bb8205..ab81d94b51 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -781,7 +781,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode) return false; } else - return true; /* someobdy else has the lock */ + return true; /* somebody else has the lock */ } } pg_unreachable(); @@ -953,7 +953,7 @@ LWLockWakeup(LWLock *lock) * that happens before the list unlink happens, the list would end up * being corrupted. * - * The barrier pairs with the LWLockWaitListLock() when enqueueing for + * The barrier pairs with the LWLockWaitListLock() when enqueuing for * another lock. */ pg_write_barrier(); @@ -1029,7 +1029,7 @@ LWLockDequeueSelf(LWLock *lock) /* * Can't just remove ourselves from the list, but we need to iterate over - * all entries as somebody else could have unqueued us. + * all entries as somebody else could have dequeued us. */ proclist_foreach_modify(iter, &lock->waiters, lwWaitLink) { diff --git a/src/backend/storage/lmgr/predicate.c b/src/backend/storage/lmgr/predicate.c index 9183764ca7..7aa719d612 100644 --- a/src/backend/storage/lmgr/predicate.c +++ b/src/backend/storage/lmgr/predicate.c @@ -3193,7 +3193,7 @@ ReleasePredicateLocks(bool isCommit) /* * We can't trust XactReadOnly here, because a transaction which started * as READ WRITE can show as READ ONLY later, e.g., within - * substransactions. We want to flag a transaction as READ ONLY if it + * subtransactions. We want to flag a transaction as READ ONLY if it * commits without writing so that de facto READ ONLY transactions get the * benefit of some RO optimizations, so we will use this local variable to * get some cleanup logic right which is based on whether the transaction diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c index 1d9384ef91..6c17b54f0d 100644 --- a/src/backend/storage/smgr/md.c +++ b/src/backend/storage/smgr/md.c @@ -1728,7 +1728,7 @@ _fdvec_resize(SMgrRelation reln, else { /* - * It doesn't seem worthwile complicating the code by having a more + * It doesn't seem worthwhile complicating the code by having a more * aggressive growth strategy here; the number of segments doesn't * grow that fast, and the memory context internally will sometimes * avoid doing an actual reallocation. diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c index 1c1b04c49e..c1e194a8f5 100644 --- a/src/backend/tsearch/spell.c +++ b/src/backend/tsearch/spell.c @@ -37,7 +37,7 @@ * Spell field. The AffixData field is initialized if AF parameter is not * defined. * - NISortAffixes(): - * - builds a list of compond affixes from the affix list and stores it + * - builds a list of compound affixes from the affix list and stores it * in the CompoundAffix. * - builds prefix trees (Trie) from the affix list for prefixes and suffixes * and stores them in Suffix and Prefix fields. diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c index e0c9ffb7f4..b612fb0e2c 100644 --- a/src/backend/tsearch/ts_parse.c +++ b/src/backend/tsearch/ts_parse.c @@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem) if (ld->curDictId == InvalidOid) { /* - * usial mode: dictionary wants only one word, but we should keep in + * usual mode: dictionary wants only one word, but we should keep in * mind that we should go through all stack */ @@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **correspondLexem) /* * We should be sure that current type of lexeme is recognized - * by our dictinonary: we just check is it exist in list of + * by our dictionary: we just check is it exist in list of * dictionaries ? */ for (i = 0; i < map->len && !dictExists; i++) @@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs) /* start of a new fragment */ infrag = 1; numfragments++; - /* add a fragment delimitor if this is after the first one */ + /* add a fragment delimiter if this is after the first one */ if (numfragments > 1) { memcpy(ptr, prs->fragdelim, prs->fragdelimlen); diff --git a/src/backend/tsearch/wparser_def.c b/src/backend/tsearch/wparser_def.c index 6586760f15..bb7edc1516 100644 --- a/src/backend/tsearch/wparser_def.c +++ b/src/backend/tsearch/wparser_def.c @@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, TSQuery query, int highlight, break; } if (curlen < min_words && i >= prs->curwords) - { /* got end of text and our cover is shoter + { /* got end of text and our cover is shorter * than min_words */ for (i = p - 1; i >= 0; i--) { diff --git a/src/backend/utils/adt/formatting.c b/src/backend/utils/adt/formatting.c index 16a7954ec4..4f3d8a1189 100644 --- a/src/backend/utils/adt/formatting.c +++ b/src/backend/utils/adt/formatting.c @@ -2265,7 +2265,7 @@ seq_search(char *name, const char *const * array, int type, int max, int *len) for (last = 0, a = array; *a != NULL; a++) { - /* comperate first chars */ + /* compare first chars */ if (*name != **a) continue; diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c b/src/backend/utils/adt/rangetypes_selfuncs.c index cf6ecc4cf3..2997edd672 100644 --- a/src/backend/utils/adt/rangetypes_selfuncs.c +++ b/src/backend/utils/adt/rangetypes_selfuncs.c @@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *typcache, VariableStatData *vardata, { /* * Lower bound no longer matters. Just estimate the fraction - * with an upper bound <= const uppert bound + * with an upper bound <= const upper bound */ hist_selec = calc_hist_selectivity_scalar(typcache, &const_upper, diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index f26175ec44..f355954b53 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -2687,7 +2687,7 @@ is_input_argument(int nth, const char *argmodes) } /* - * Append used transformated types to specified buffer + * Append used transformed types to specified buffer */ static void print_function_trftypes(StringInfo buf, HeapTuple proctup) diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c index 9b2cd6df41..76e5e541b6 100644 --- a/src/backend/utils/adt/tsrank.c +++ b/src/backend/utils/adt/tsrank.c @@ -899,7 +899,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method) /* * if doc are big enough then ext.q may be equal to ext.p due to limit - * of posional information. In this case we approximate number of + * of positional information. In this case we approximate number of * noise word as half cover's length */ nNoise = (ext.q - ext.p) - (ext.end - ext.begin); @@ -908,7 +908,7 @@ calc_rank_cd(const float4 *arrdata, TSVector txt, TSQuery query, int method) Wdoc += Cpos / ((double) (1 + nNoise)); CurExtPos = ((double) (ext.q + ext.p)) / 2.0; - if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent devision by + if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent division by * zero in a case of multiple lexize */ ) SumDist += 1.0 / (CurExtPos - PrevExtPos); diff --git a/src/backend/utils/adt/windowfuncs.c b/src/backend/utils/adt/windowfuncs.c index 4e714cd5bf..d86ad703da 100644 --- a/src/backend/utils/adt/windowfuncs.c +++ b/src/backend/utils/adt/windowfuncs.c @@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS) /* * lag_with_offset - * returns the value of VE evelulated on a row that is OFFSET + * returns the value of VE evaluated on a row that is OFFSET * rows before the current row within a partition, * per spec. */ diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c index 8a7c560e46..4dd2e2b2c6 100644 --- a/src/backend/utils/cache/relcache.c +++ b/src/backend/utils/cache/relcache.c @@ -1433,7 +1433,7 @@ RelationInitPhysicalAddr(Relation relation) * points to the current file since the older file will be gone (or * truncated). The new file will still contain older rows so lookups * in them will work correctly. This wouldn't work correctly if - * rewrites were allowed to change the schema in a noncompatible way, + * rewrites were allowed to change the schema in an incompatible way, * but those are prevented both on catalog tables and on user tables * declared as additional catalog tables. */ diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c index c55da54878..af08f102fe 100644 --- a/src/backend/utils/fmgr/funcapi.c +++ b/src/backend/utils/fmgr/funcapi.c @@ -879,7 +879,7 @@ get_func_arg_info(HeapTuple procTup, /* * get_func_trftypes * - * Returns a number of transformated types used by function. + * Returns the number of transformed types used by function. */ int get_func_trftypes(HeapTuple procTup, diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 4d0a2a7bed..9f938f2d27 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid roleid) relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock); - /* read all the settings under the same snapsot for efficiency */ + /* read all the settings under the same snapshot for efficiency */ snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId)); /* Later settings are ignored if set earlier. */ diff --git a/src/backend/utils/misc/Makefile b/src/backend/utils/misc/Makefile index 0ad1b8b595..45cdf76ec2 100644 --- a/src/backend/utils/misc/Makefile +++ b/src/backend/utils/misc/Makefile @@ -19,7 +19,7 @@ OBJS = backend_random.o guc.o help_config.o pg_config.o pg_controldata.o \ tzparser.o # This location might depend on the installation directories. Therefore -# we can't subsitute it into pg_config.h. +# we can't substitute it into pg_config.h. ifdef krb_srvtab override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"' endif diff --git a/src/backend/utils/mmgr/freepage.c b/src/backend/utils/mmgr/freepage.c index 230756e0cd..2cd758178c 100644 --- a/src/backend/utils/mmgr/freepage.c +++ b/src/backend/utils/mmgr/freepage.c @@ -318,7 +318,7 @@ sum_free_pages(FreePageManager *fpm) /* * Compute the size of the largest run of pages that the user could - * succesfully get. + * successfully get. */ static Size FreePageManagerLargestContiguous(FreePageManager *fpm) @@ -360,7 +360,7 @@ FreePageManagerLargestContiguous(FreePageManager *fpm) /* * Recompute the size of the largest run of pages that the user could - * succesfully get, if it has been marked dirty. + * successfully get, if it has been marked dirty. */ static void FreePageManagerUpdateLargest(FreePageManager *fpm) @@ -1704,7 +1704,7 @@ FreePageManagerPutInternal(FreePageManager *fpm, Size first_page, Size npages, * The act of allocating pages for use in constructing our btree * should never cause any page to become more full, so the new * split depth should be no greater than the old one, and perhaps - * less if we fortutiously allocated a chunk that freed up a slot + * less if we fortuitously allocated a chunk that freed up a slot * on the page we need to update. */ Assert(result.split_pages <= fpm->btree_recycle_count); diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c index 053a6d1c70..703bdcedaf 100644 --- a/src/backend/utils/time/tqual.c +++ b/src/backend/utils/time/tqual.c @@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple) } /* - * check whether the transaciont id 'xid' is in the pre-sorted array 'xip'. + * check whether the transaction id 'xid' is in the pre-sorted array 'xip'. */ static bool TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num) diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index 5388c08b29..5737608f9e 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -198,7 +198,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH) * * Optional. * - * Set up extrac format-related TOC data. + * Set up extract format-related TOC data. */ static void _ArchiveEntry(ArchiveHandle *AH, TocEntry *te) diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 35ac05e851..4d22802912 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -3500,7 +3500,7 @@ getPublicationTables(Archive *fout, TableInfo tblinfo[], int numTables) resetPQExpBuffer(query); - /* Get the publication memebership for the table. */ + /* Get the publication membership for the table. */ appendPQExpBuffer(query, "SELECT pr.tableoid, pr.oid, p.pubname " "FROM pg_catalog.pg_publication_rel pr," diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c index 6e3acdc416..5349c39411 100644 --- a/src/bin/psql/common.c +++ b/src/bin/psql/common.c @@ -828,7 +828,7 @@ StoreQueryTuple(const PGresult *result) char *varname; char *value; - /* concate prefix and column name */ + /* concatenate prefix and column name */ varname = psprintf("%s%s", pset.gset_prefix, colname); if (!PQgetisnull(result, 0, i)) diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index c501168d8c..e2e4cbcc08 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -2127,7 +2127,7 @@ describeOneTableDetails(const char *schemaname, printTableAddFooter(&cont, _("Check constraints:")); for (i = 0; i < tuples; i++) { - /* untranslated contraint name and def */ + /* untranslated constraint name and def */ printfPQExpBuffer(&buf, " \"%s\" %s", PQgetvalue(result, i, 0), PQgetvalue(result, i, 1)); @@ -3197,7 +3197,7 @@ listTables(const char *tabtypes, const char *pattern, bool verbose, bool showSys if (verbose) { /* - * As of PostgreSQL 9.0, use pg_table_size() to show a more acurate + * As of PostgreSQL 9.0, use pg_table_size() to show a more accurate * size of a table, including FSM, VM and TOAST tables. */ if (pset.sversion >= 90000) @@ -5108,7 +5108,7 @@ describeSubscriptions(const char *pattern, bool verbose) gettext_noop("Conninfo")); } - /* Only display subscritpions in current database. */ + /* Only display subscriptions in current database. */ appendPQExpBufferStr(&buf, "FROM pg_catalog.pg_subscription\n" "WHERE subdbid = (SELECT oid\n" diff --git a/src/include/access/visibilitymap.h b/src/include/access/visibilitymap.h index 7a237d7569..a3796f2902 100644 --- a/src/include/access/visibilitymap.h +++ b/src/include/access/visibilitymap.h @@ -26,7 +26,7 @@ #define VISIBILITYMAP_ALL_VISIBLE 0x01 #define VISIBILITYMAP_ALL_FROZEN 0x02 #define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid - * visiblitymap flags bits */ + * visibilitymap flags bits */ /* Macros for visibilitymap test */ #define VM_ALL_VISIBLE(r, b, v) \ diff --git a/src/include/access/xact.h b/src/include/access/xact.h index 4df6529ea0..e7d11913d1 100644 --- a/src/include/access/xact.h +++ b/src/include/access/xact.h @@ -65,7 +65,7 @@ typedef enum * apply */ } SyncCommitLevel; -/* Define the default setting for synchonous_commit */ +/* Define the default setting for synchronous_commit */ #define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH /* Synchronous commit level */ diff --git a/src/include/c.h b/src/include/c.h index a2c043adfb..91e5baa969 100644 --- a/src/include/c.h +++ b/src/include/c.h @@ -989,7 +989,7 @@ typedef NameData *Name; /* gettext domain name mangling */ /* - * To better support parallel installations of major PostgeSQL + * To better support parallel installations of major PostgreSQL * versions as well as parallel installations of major library soname * versions, we mangle the gettext domain name by appending those * version numbers. The coding rule ought to be that wherever the diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h index 93cb1686bd..b195d1a5ab 100644 --- a/src/include/catalog/partition.h +++ b/src/include/catalog/partition.h @@ -41,7 +41,7 @@ typedef struct PartitionDescData *PartitionDesc; /*----------------------- * PartitionDispatch - information about one partitioned table in a partition - * hiearchy required to route a tuple to one of its partitions + * hierarchy required to route a tuple to one of its partitions * * reldesc Relation descriptor of the table * key Partition key information of the table diff --git a/src/include/catalog/pg_subscription.h b/src/include/catalog/pg_subscription.h index cf30bf90db..75b618accd 100644 --- a/src/include/catalog/pg_subscription.h +++ b/src/include/catalog/pg_subscription.h @@ -23,7 +23,7 @@ #define SubscriptionRelation_Rowtype_Id 6101 /* - * Technicaly, the subscriptions live inside the database, so a shared catalog + * Technically, the subscriptions live inside the database, so a shared catalog * seems weird, but the replication launcher process needs to access all of * them to be able to start the workers, so we have to put them in a shared, * nailed catalog. @@ -35,7 +35,7 @@ CATALOG(pg_subscription,6100) BKI_SHARED_RELATION BKI_ROWTYPE_OID(6101) BKI_SCHE Oid subowner; /* Owner of the subscription */ - bool subenabled; /* True if the subsription is enabled + bool subenabled; /* True if the subscription is enabled * (the worker should be running) */ #ifdef CATALOG_VARLEN /* variable-length fields start here */ @@ -65,7 +65,7 @@ typedef FormData_pg_subscription *Form_pg_subscription; typedef struct Subscription { Oid oid; /* Oid of the subscription */ - Oid dbid; /* Oid of the database which dubscription is in */ + Oid dbid; /* Oid of the database which subscription is in */ char *name; /* Name of the subscription */ Oid owner; /* Oid of the subscription owner */ bool enabled; /* Indicates if the subscription is enabled */ diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h index 12aedbc384..72e18499c0 100644 --- a/src/include/lib/simplehash.h +++ b/src/include/lib/simplehash.h @@ -345,7 +345,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize) * we need. We neither want tb->members increased, nor do we need to do * deal with deleted elements, nor do we need to compare keys. So a * special-cased implementation is lot faster. As resizing can be time - * consuming and frequent, that's worthwile to optimize. + * consuming and frequent, that's worthwhile to optimize. * * To be able to simply move entries over, we have to start not at the * first bucket (i.e olddata[0]), but find the first bucket that's either @@ -620,7 +620,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key) /* * Backward shift following elements till either an empty element - * or an element at its optimal position is encounterered. + * or an element at its optimal position is encountered. * * While that sounds expensive, the average chain length is short, * and deletions would otherwise require toombstones. diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h index f46cd49bab..1ac56ccbb1 100644 --- a/src/include/storage/s_lock.h +++ b/src/include/storage/s_lock.h @@ -842,7 +842,7 @@ typedef LONG slock_t; #define SPIN_DELAY() spin_delay() /* If using Visual C++ on Win64, inline assembly is unavailable. - * Use a _mm_pause instrinsic instead of rep nop. + * Use a _mm_pause intrinsic instead of rep nop. */ #if defined(_WIN64) static __forceinline void diff --git a/src/include/tsearch/dicts/spell.h b/src/include/tsearch/dicts/spell.h index 8cba645540..3032d0b508 100644 --- a/src/include/tsearch/dicts/spell.h +++ b/src/include/tsearch/dicts/spell.h @@ -147,7 +147,7 @@ typedef struct } CMPDAffix; /* - * Type of encoding affix flags in Hunspel dictionaries + * Type of encoding affix flags in Hunspell dictionaries */ typedef enum { diff --git a/src/interfaces/ecpg/ecpglib/execute.c b/src/interfaces/ecpg/ecpglib/execute.c index a3ae92eb35..d5a463d940 100644 --- a/src/interfaces/ecpg/ecpglib/execute.c +++ b/src/interfaces/ecpg/ecpglib/execute.c @@ -2,7 +2,7 @@ /* * The aim is to get a simpler interface to the database routines. - * All the tidieous messing around with tuples is supposed to be hidden + * All the tedious messing around with tuples is supposed to be hidden * by this function. */ /* Author: Linus Tolke diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c b/src/interfaces/ecpg/pgtypeslib/datetime.c index 3b0855f722..7216b432d4 100644 --- a/src/interfaces/ecpg/pgtypeslib/datetime.c +++ b/src/interfaces/ecpg/pgtypeslib/datetime.c @@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const char *fmtstring, char *outbuf) * * function works as follows: * - first we analyze the parameters - * - if this is a special case with no delimiters, add delimters + * - if this is a special case with no delimiters, add delimiters * - find the tokens. First we look for numerical values. If we have found * less than 3 tokens, we check for the months' names and thereafter for * the abbreviations of the months' names. diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c b/src/interfaces/ecpg/pgtypeslib/numeric.c index 120794550d..a93d074de2 100644 --- a/src/interfaces/ecpg/pgtypeslib/numeric.c +++ b/src/interfaces/ecpg/pgtypeslib/numeric.c @@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeric *var2) { /* use cmp_abs function to calculate the result */ - /* both are positive: normal comparation with cmp_abs */ + /* both are positive: normal comparison with cmp_abs */ if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS) return cmp_abs(var1, var2); - /* both are negative: return the inverse of the normal comparation */ + /* both are negative: return the inverse of the normal comparison */ if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG) { /* diff --git a/src/interfaces/ecpg/preproc/ecpg.header b/src/interfaces/ecpg/preproc/ecpg.header index 672f0b45d4..2562366bbe 100644 --- a/src/interfaces/ecpg/preproc/ecpg.header +++ b/src/interfaces/ecpg/preproc/ecpg.header @@ -207,7 +207,7 @@ create_questionmarks(char *name, bool array) /* In case we have a struct, we have to print as many "?" as there are attributes in the struct * An array is only allowed together with an element argument - * This is essantially only used for inserts, but using a struct as input parameter is an error anywhere else + * This is essentially only used for inserts, but using a struct as input parameter is an error anywhere else * so we don't have to worry here. */ if (p->type->type == ECPGt_struct || (array && p->type->type == ECPGt_array && p->type->u.element->type == ECPGt_struct)) diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer b/src/interfaces/ecpg/preproc/ecpg.trailer index 31e765ccd3..1c108795de 100644 --- a/src/interfaces/ecpg/preproc/ecpg.trailer +++ b/src/interfaces/ecpg/preproc/ecpg.trailer @@ -355,7 +355,7 @@ ECPGExecuteImmediateStmt: EXECUTE IMMEDIATE execstring $$ = $3; }; /* - * variable decalartion outside exec sql declare block + * variable declaration outside exec sql declare block */ ECPGVarDeclaration: single_vt_declaration; @@ -707,7 +707,7 @@ struct_union_type_with_symbol: s_struct_union_symbol free(forward_name); forward_name = NULL; - /* This is essantially a typedef but needs the keyword struct/union as well. + /* This is essentially a typedef but needs the keyword struct/union as well. * So we create the typedef for each struct definition with symbol */ for (ptr = types; ptr != NULL; ptr = ptr->next) { @@ -1275,7 +1275,7 @@ descriptor_item: SQL_CARDINALITY { $$ = ECPGd_cardinality; } ; /* - * set/reset the automatic transaction mode, this needs a differnet handling + * set/reset the automatic transaction mode, this needs a different handling * as the other set commands */ ECPGSetAutocommit: SET SQL_AUTOCOMMIT '=' on_off { $$ = $4; } @@ -1287,7 +1287,7 @@ on_off: ON { $$ = mm_strdup("on"); } ; /* - * set the actual connection, this needs a differnet handling as the other + * set the actual connection, this needs a different handling as the other * set commands */ ECPGSetConnection: SET CONNECTION TO connection_object { $$ = $4; } diff --git a/src/interfaces/ecpg/preproc/parse.pl b/src/interfaces/ecpg/preproc/parse.pl index ea661d3694..8a401304ec 100644 --- a/src/interfaces/ecpg/preproc/parse.pl +++ b/src/interfaces/ecpg/preproc/parse.pl @@ -550,7 +550,7 @@ sub dump_fields if ($len == 1) { - # Straight assignement + # Straight assignment $str = ' $$ = ' . $flds_new[0] . ';'; add_to_buffer('rules', $str); } diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c index 2845d3b9d2..b47a16e3d0 100644 --- a/src/interfaces/libpq/fe-auth.c +++ b/src/interfaces/libpq/fe-auth.c @@ -803,7 +803,7 @@ pg_fe_getauthname(PQExpBuffer errorMessage) * be sent in cleartext if it is encrypted on the client side. This is * good because it ensures the cleartext password won't end up in logs, * pg_stat displays, etc. We export the function so that clients won't - * be dependent on low-level details like whether the enceyption is MD5 + * be dependent on low-level details like whether the encryption is MD5 * or something else. * * Arguments are the cleartext password, and the SQL name of the user it diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h index c655388864..e9b73a925e 100644 --- a/src/interfaces/libpq/libpq-int.h +++ b/src/interfaces/libpq/libpq-int.h @@ -309,7 +309,7 @@ typedef struct pg_conn_host char *host; /* host name or address, or socket path */ pg_conn_host_type type; /* type of host */ char *port; /* port number for this host; if not NULL, - * overrrides the PGConn's pgport */ + * overrides the PGConn's pgport */ char *password; /* password for this host, read from the * password file. only set if the PGconn's * pgpass field is NULL. */ @@ -666,7 +666,7 @@ extern void pq_reset_sigpipe(sigset_t *osigset, bool sigpipe_pending, #endif /* - * The SSL implementatation provides these functions (fe-secure-openssl.c) + * The SSL implementation provides these functions (fe-secure-openssl.c) */ extern void pgtls_init_library(bool do_ssl, int do_crypto); extern int pgtls_init(PGconn *conn); diff --git a/src/interfaces/libpq/win32.c b/src/interfaces/libpq/win32.c index d6ecca3859..f99f9a8cdb 100644 --- a/src/interfaces/libpq/win32.c +++ b/src/interfaces/libpq/win32.c @@ -32,7 +32,7 @@ #include "win32.h" -/* Declared here to avoid pulling in all includes, which causes name collissions */ +/* Declared here to avoid pulling in all includes, which causes name collisions */ #ifdef ENABLE_NLS extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1); #else diff --git a/src/pl/plperl/ppport.h b/src/pl/plperl/ppport.h index 5ea0c66e98..31d06cb3b0 100644 --- a/src/pl/plperl/ppport.h +++ b/src/pl/plperl/ppport.h @@ -79,7 +79,7 @@ to be installed on your system. If this option is given, a copy of each file will be saved with the given suffix that contains the suggested changes. This does not require any external programs. Note that this does not -automagially add a dot between the original filename and the +automagically add a dot between the original filename and the suffix. If you want the dot, you have to include it in the option argument. @@ -4364,9 +4364,9 @@ DPPP_(my_vload_module)(U32 flags, SV *name, SV *ver, va_list *args) OP * const modname = newSVOP(OP_CONST, 0, name); /* 5.005 has a somewhat hacky force_normal that doesn't croak on - SvREADONLY() if PL_compling is true. Current perls take care in + SvREADONLY() if PL_compiling is true. Current perls take care in ck_require() to correctly turn off SvREADONLY before calling - force_normal_flags(). This seems a better fix than fudging PL_compling + force_normal_flags(). This seems a better fix than fudging PL_compiling */ SvREADONLY_off(((SVOP*)modname)->op_sv); modname->op_private |= OPpCONST_BARE; diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index d61493fac8..c4806274bc 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -303,7 +303,7 @@ PLy_traceback(PyObject *e, PyObject *v, PyObject *tb, long plain_lineno; /* - * The second frame points at the internal function, but to mimick + * The second frame points at the internal function, but to mimic * Python error reporting we want to say . */ if (*tb_depth == 1) diff --git a/src/pl/plpython/plpy_plpymodule.c b/src/pl/plpython/plpy_plpymodule.c index 0cf2ad29cb..761534406d 100644 --- a/src/pl/plpython/plpy_plpymodule.c +++ b/src/pl/plpython/plpy_plpymodule.c @@ -463,7 +463,7 @@ PLy_output(volatile int level, PyObject *self, PyObject *args, PyObject *kw) if (strcmp(keyword, "message") == 0) { - /* the message should not be overwriten */ + /* the message should not be overwritten */ if (PyTuple_Size(args) != 0) { PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position"); diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h index 5f5c1ad5c6..e04722c47a 100644 --- a/src/pl/plpython/plpy_typeio.h +++ b/src/pl/plpython/plpy_typeio.h @@ -43,7 +43,7 @@ typedef union PLyTypeInput } PLyTypeInput; /* - * Conversion from Python object to a Postgresql Datum. + * Conversion from Python object to a PostgreSQL Datum. * * The 'inarray' argument to the conversion function is true, if the * converted value was in an array (Python list). It is used to give a @@ -78,7 +78,7 @@ typedef union PLyTypeOutput PLyObToTuple r; } PLyTypeOutput; -/* all we need to move Postgresql data to Python objects, +/* all we need to move PostgreSQL data to Python objects, * and vice versa */ typedef struct PLyTypeInfo diff --git a/src/test/isolation/specs/receipt-report.spec b/src/test/isolation/specs/receipt-report.spec index 1e214960d1..5e1d51d0bd 100644 --- a/src/test/isolation/specs/receipt-report.spec +++ b/src/test/isolation/specs/receipt-report.spec @@ -7,7 +7,7 @@ # be changed and a report of the closed day's receipts subsequently # run which will miss a receipt from the date which has been closed. # -# There are only six permuations which must cause a serialization failure. +# There are only six permutations which must cause a serialization failure. # Failure cases are where s1 overlaps both s2 and s3, but s2 commits before # s3 executes its first SELECT. # diff --git a/src/test/isolation/specs/two-ids.spec b/src/test/isolation/specs/two-ids.spec index d67064068e..277097125a 100644 --- a/src/test/isolation/specs/two-ids.spec +++ b/src/test/isolation/specs/two-ids.spec @@ -2,7 +2,7 @@ # # Small, simple test showing read-only anomalies. # -# There are only four permuations which must cause a serialization failure. +# There are only four permutations which must cause a serialization failure. # Required failure cases are where s2 overlaps both s1 and s3, but s1 # commits before s3 executes its first SELECT. # diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 76b9c2d372..d8e7b61294 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -310,7 +310,7 @@ INSERT INTO tmp3 values (5,50); -- Try (and fail) to add constraint due to invalid source columns ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; ERROR: column "c" referenced in foreign key constraint does not exist --- Try (and fail) to add constraint due to invalide destination columns explicitly given +-- Try (and fail) to add constraint due to invalid destination columns explicitly given ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; ERROR: column "b" referenced in foreign key constraint does not exist -- Try (and fail) to add constraint due to invalid data @@ -2842,7 +2842,7 @@ ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1" ALTER TABLE unlogged1 SET LOGGED; --- check relpersistence of an unlogged table after changing to permament +-- check relpersistence of an unlogged table after changing to permanent SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' UNION ALL SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' @@ -3029,7 +3029,7 @@ ERROR: cannot alter type of column referenced in partition key expression -- cannot drop NOT NULL on columns in the range partition key ALTER TABLE partitioned ALTER COLUMN a DROP NOT NULL; ERROR: column "a" is in range partition key --- partitioned table cannot partiticipate in regular inheritance +-- partitioned table cannot participate in regular inheritance CREATE TABLE foo ( a int, b int diff --git a/src/test/regress/expected/create_table.out b/src/test/regress/expected/create_table.out index 36266f0a32..fc92cd92dd 100644 --- a/src/test/regress/expected/create_table.out +++ b/src/test/regress/expected/create_table.out @@ -424,7 +424,7 @@ DROP FUNCTION plusone(int); ERROR: cannot drop function plusone(integer) because other objects depend on it DETAIL: table partitioned depends on function plusone(integer) HINT: Use DROP ... CASCADE to drop the dependent objects too. --- partitioned table cannot partiticipate in regular inheritance +-- partitioned table cannot participate in regular inheritance CREATE TABLE partitioned2 ( a int ) PARTITION BY LIST ((a+1)); diff --git a/src/test/regress/expected/indirect_toast.out b/src/test/regress/expected/indirect_toast.out index 4f4bf41973..3e255fbded 100644 --- a/src/test/regress/expected/indirect_toast.out +++ b/src/test/regress/expected/indirect_toast.out @@ -23,7 +23,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); ("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) --- modification without modifying asigned value +-- modification without modifying assigned value UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -61,7 +61,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest; ("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) --- check we didn't screw with main/toast tuple visiblity +-- check we didn't screw with main/toast tuple visibility VACUUM FREEZE toasttest; SELECT substring(toasttest::text, 1, 200) FROM toasttest; substring @@ -95,7 +95,7 @@ UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); ("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 (4 rows) --- modification without modifying asigned value +-- modification without modifying assigned value UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); substring ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- @@ -135,7 +135,7 @@ SELECT substring(toasttest::text, 1, 200) FROM toasttest; ("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 (5 rows) --- check we didn't screw with main/toast tuple visiblity +-- check we didn't screw with main/toast tuple visibility VACUUM FREEZE toasttest; SELECT substring(toasttest::text, 1, 200) FROM toasttest; substring diff --git a/src/test/regress/expected/init_privs.out b/src/test/regress/expected/init_privs.out index 55139d4d37..292b1a1035 100644 --- a/src/test/regress/expected/init_privs.out +++ b/src/test/regress/expected/init_privs.out @@ -1,4 +1,4 @@ --- Test iniital privileges +-- Test initial privileges -- There should always be some initial privileges, set up by initdb SELECT count(*) > 0 FROM pg_init_privs; ?column? diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index 63859c53ac..8d005fddd4 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -291,7 +291,7 @@ insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), ke ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification drop index comp_key_index; -- --- Partial index tests, no inference predicate specificied +-- Partial index tests, no inference predicate specified -- create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5; create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5; diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index d9bbae097b..c3bb4fe767 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -4260,7 +4260,7 @@ select * from -- Test hints given on incorrect column references are useful -- select t1.uunique1 from - tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion ERROR: column t1.uunique1 does not exist LINE 1: select t1.uunique1 from ^ diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out index 4ae44607a4..d1f35c5833 100644 --- a/src/test/regress/expected/matview.out +++ b/src/test/regress/expected/matview.out @@ -292,7 +292,7 @@ SELECT * FROM mvtest_tvvm; -- test diemv when the mv does not exist DROP MATERIALIZED VIEW IF EXISTS no_such_mv; NOTICE: materialized view "no_such_mv" does not exist, skipping --- make sure invalid comination of options is prohibited +-- make sure invalid combination of options is prohibited REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA; ERROR: CONCURRENTLY and WITH NO DATA options cannot be used together -- no tuple locks on materialized views diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out index 79513e4598..04848c10a2 100644 --- a/src/test/regress/expected/plpgsql.out +++ b/src/test/regress/expected/plpgsql.out @@ -1423,7 +1423,7 @@ select * from WSlot order by slotname; -- -- Install the central phone system and create the phone numbers. --- They are weired on insert to the patchfields. Again the +-- They are wired on insert to the patchfields. Again the -- triggers automatically tell the PSlots to update their -- backlink field. -- diff --git a/src/test/regress/expected/replica_identity.out b/src/test/regress/expected/replica_identity.out index 1a04ec5561..fa63235fc9 100644 --- a/src/test/regress/expected/replica_identity.out +++ b/src/test/regress/expected/replica_identity.out @@ -98,7 +98,7 @@ Indexes: -- succeed, oid unique index ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx; --- succeed, nondeferrable unique constraint over nonullable cols +-- succeed, nondeferrable unique constraint over nonnullable cols ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer; -- succeed unique index over nonnullable cols ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key; diff --git a/src/test/regress/expected/rolenames.out b/src/test/regress/expected/rolenames.out index b8bf0cf877..fd058e4f7d 100644 --- a/src/test/regress/expected/rolenames.out +++ b/src/test/regress/expected/rolenames.out @@ -440,7 +440,7 @@ LINE 1: ALTER USER NONE SET application_name to 'BOMB'; ^ ALTER USER nonexistent SET application_name to 'BOMB'; -- error ERROR: role "nonexistent" does not exist --- CREAETE SCHEMA +-- CREATE SCHEMA set client_min_messages to error; CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER; CREATE SCHEMA newschema2 AUTHORIZATION "current_user"; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index de5ae00970..60731a99b7 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -938,7 +938,7 @@ CREATE TABLE shoe_data ( shoename char(10), -- primary key sh_avail integer, -- available # of pairs slcolor char(10), -- preferred shoelace color - slminlen float, -- miminum shoelace length + slminlen float, -- minimum shoelace length slmaxlen float, -- maximum shoelace length slunit char(8) -- length unit ); diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out index 8ed64d3c68..493a25587c 100644 --- a/src/test/regress/expected/tsdicts.out +++ b/src/test/regress/expected/tsdicts.out @@ -383,7 +383,7 @@ SELECT ts_lexize('hunspell_num', 'footballyklubber'); {foot,ball,klubber} (1 row) --- Synonim dictionary +-- Synonym dictionary CREATE TEXT SEARCH DICTIONARY synonym ( Template=synonym, Synonyms=synonym_sample diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 4611cbb731..1f551ec53c 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -255,7 +255,7 @@ INSERT INTO tmp3 values (5,50); -- Try (and fail) to add constraint due to invalid source columns ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full; --- Try (and fail) to add constraint due to invalide destination columns explicitly given +-- Try (and fail) to add constraint due to invalid destination columns explicitly given ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full; -- Try (and fail) to add constraint due to invalid data @@ -1829,7 +1829,7 @@ CREATE UNLOGGED TABLE unlogged3(f1 SERIAL PRIMARY KEY, f2 INTEGER REFERENCES unl ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists ALTER TABLE unlogged1 SET LOGGED; --- check relpersistence of an unlogged table after changing to permament +-- check relpersistence of an unlogged table after changing to permanent SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1' UNION ALL SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1' @@ -1917,7 +1917,7 @@ ALTER TABLE partitioned ALTER COLUMN b TYPE char(5); -- cannot drop NOT NULL on columns in the range partition key ALTER TABLE partitioned ALTER COLUMN a DROP NOT NULL; --- partitioned table cannot partiticipate in regular inheritance +-- partitioned table cannot participate in regular inheritance CREATE TABLE foo ( a int, b int diff --git a/src/test/regress/sql/create_table.sql b/src/test/regress/sql/create_table.sql index 6314aa403f..5f25c436ee 100644 --- a/src/test/regress/sql/create_table.sql +++ b/src/test/regress/sql/create_table.sql @@ -418,7 +418,7 @@ SELECT attname, attnotnull FROM pg_attribute -- prevent a function referenced in partition key from being dropped DROP FUNCTION plusone(int); --- partitioned table cannot partiticipate in regular inheritance +-- partitioned table cannot participate in regular inheritance CREATE TABLE partitioned2 ( a int ) PARTITION BY LIST ((a+1)); diff --git a/src/test/regress/sql/indirect_toast.sql b/src/test/regress/sql/indirect_toast.sql index d502480ad3..18b6cc3a95 100644 --- a/src/test/regress/sql/indirect_toast.sql +++ b/src/test/regress/sql/indirect_toast.sql @@ -11,7 +11,7 @@ SELECT descr, substring(make_tuple_indirect(toasttest)::text, 1, 200) FROM toast -- modification without changing varlenas UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); --- modification without modifying asigned value +-- modification without modifying assigned value UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); -- modification modifying, but effectively not changing @@ -20,7 +20,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = f1||'' RETURNING substring(toasttest::te UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200); SELECT substring(toasttest::text, 1, 200) FROM toasttest; --- check we didn't screw with main/toast tuple visiblity +-- check we didn't screw with main/toast tuple visibility VACUUM FREEZE toasttest; SELECT substring(toasttest::text, 1, 200) FROM toasttest; @@ -42,7 +42,7 @@ CREATE TRIGGER toasttest_update_indirect -- modification without changing varlenas UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200); --- modification without modifying asigned value +-- modification without modifying assigned value UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200); -- modification modifying, but effectively not changing @@ -53,7 +53,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toastte INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL); SELECT substring(toasttest::text, 1, 200) FROM toasttest; --- check we didn't screw with main/toast tuple visiblity +-- check we didn't screw with main/toast tuple visibility VACUUM FREEZE toasttest; SELECT substring(toasttest::text, 1, 200) FROM toasttest; diff --git a/src/test/regress/sql/init_privs.sql b/src/test/regress/sql/init_privs.sql index 9b4c70246e..4a31af2798 100644 --- a/src/test/regress/sql/init_privs.sql +++ b/src/test/regress/sql/init_privs.sql @@ -1,4 +1,4 @@ --- Test iniital privileges +-- Test initial privileges -- There should always be some initial privileges, set up by initdb SELECT count(*) > 0 FROM pg_init_privs; diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql index 116cf763f9..df3a9b59b5 100644 --- a/src/test/regress/sql/insert_conflict.sql +++ b/src/test/regress/sql/insert_conflict.sql @@ -138,7 +138,7 @@ insert into insertconflicttest values (12, 'Date') on conflict (lower(fruit), ke drop index comp_key_index; -- --- Partial index tests, no inference predicate specificied +-- Partial index tests, no inference predicate specified -- create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5; create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5; diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 97bccec721..bf18a8f6c4 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -1456,7 +1456,7 @@ select * from -- select t1.uunique1 from - tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn + tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion select t2.uunique1 from tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion select uunique1 from diff --git a/src/test/regress/sql/matview.sql b/src/test/regress/sql/matview.sql index 1164b4cea2..e0f4a1319f 100644 --- a/src/test/regress/sql/matview.sql +++ b/src/test/regress/sql/matview.sql @@ -92,7 +92,7 @@ SELECT * FROM mvtest_tvvm; -- test diemv when the mv does not exist DROP MATERIALIZED VIEW IF EXISTS no_such_mv; --- make sure invalid comination of options is prohibited +-- make sure invalid combination of options is prohibited REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA; -- no tuple locks on materialized views diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql index 877d3ad08e..31dcbdffdd 100644 --- a/src/test/regress/sql/plpgsql.sql +++ b/src/test/regress/sql/plpgsql.sql @@ -1350,7 +1350,7 @@ select * from WSlot order by slotname; -- -- Install the central phone system and create the phone numbers. --- They are weired on insert to the patchfields. Again the +-- They are wired on insert to the patchfields. Again the -- triggers automatically tell the PSlots to update their -- backlink field. -- diff --git a/src/test/regress/sql/replica_identity.sql b/src/test/regress/sql/replica_identity.sql index 68824a3aa7..3d2171c733 100644 --- a/src/test/regress/sql/replica_identity.sql +++ b/src/test/regress/sql/replica_identity.sql @@ -56,7 +56,7 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; -- succeed, oid unique index ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx; --- succeed, nondeferrable unique constraint over nonullable cols +-- succeed, nondeferrable unique constraint over nonnullable cols ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer; -- succeed unique index over nonnullable cols diff --git a/src/test/regress/sql/rolenames.sql b/src/test/regress/sql/rolenames.sql index 451d9d338d..4c5706bbaa 100644 --- a/src/test/regress/sql/rolenames.sql +++ b/src/test/regress/sql/rolenames.sql @@ -176,7 +176,7 @@ ALTER USER PUBLIC SET application_name to 'BOMB'; -- error ALTER USER NONE SET application_name to 'BOMB'; -- error ALTER USER nonexistent SET application_name to 'BOMB'; -- error --- CREAETE SCHEMA +-- CREATE SCHEMA set client_min_messages to error; CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER; CREATE SCHEMA newschema2 AUTHORIZATION "current_user"; diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql index 835945f4b7..90dc9ceaf4 100644 --- a/src/test/regress/sql/rules.sql +++ b/src/test/regress/sql/rules.sql @@ -522,7 +522,7 @@ CREATE TABLE shoe_data ( shoename char(10), -- primary key sh_avail integer, -- available # of pairs slcolor char(10), -- preferred shoelace color - slminlen float, -- miminum shoelace length + slminlen float, -- minimum shoelace length slmaxlen float, -- maximum shoelace length slunit char(8) -- length unit ); diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql index 4d0419e35a..ed2cbe1fec 100644 --- a/src/test/regress/sql/tsdicts.sql +++ b/src/test/regress/sql/tsdicts.sql @@ -96,7 +96,7 @@ SELECT ts_lexize('hunspell_num', 'footballklubber'); SELECT ts_lexize('hunspell_num', 'ballyklubber'); SELECT ts_lexize('hunspell_num', 'footballyklubber'); --- Synonim dictionary +-- Synonym dictionary CREATE TEXT SEARCH DICTIONARY synonym ( Template=synonym, Synonyms=synonym_sample diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm index 20eaf76bff..9441249b3a 100644 --- a/src/test/ssl/ServerSetup.pm +++ b/src/test/ssl/ServerSetup.pm @@ -7,7 +7,7 @@ # - ssl/root+client_ca.crt as the CA root for validating client certs. # - reject non-SSL connections # - a database called trustdb that lets anyone in -# - another database called certdb that uses certificate authentiction, ie. +# - another database called certdb that uses certificate authentication, ie. # the client must present a valid certificate signed by the client CA # - two users, called ssltestuser and anotheruser. # -- 2.40.0