test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
# When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $INSTALL in
*install-sh*) install_bin='';;
$as_echo "$MKDIR_P" >&6; }
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $MKDIR_P in
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
AC_PROG_INSTALL
# When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $INSTALL in
*install-sh*) install_bin='';;
AC_PROG_AWK
AC_PROG_MKDIR_P
# When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
# with our Makefile.global concept. This workaround helps.
case $MKDIR_P in
*install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
initBloomState(&state, index);
/*
- * Interate over the pages. We don't care about concurrently added pages,
+ * Iterate over the pages. We don't care about concurrently added pages,
* they can't contain tuples to delete.
*/
npages = RelationGetNumberOfBlocks(index);
5
(1 row)
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord
0
(1 row)
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord
5
(1 row)
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord
0
(1 row)
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord
5
(1 row)
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord
0
(1 row)
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord
5
(1 row)
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
cube_ll_coord
0
(1 row)
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
cube_ur_coord
SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
--
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
SELECT cube_ll_coord('(42,137)'::cube, 2);
SELECT cube_ll_coord('(42,137)'::cube, 3);
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
--
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
LANGUAGE SQL IMMUTABLE PARALLEL SAFE
AS 'SELECT ''6378168''::float8';
--- Astromers may want to change the earth function so that distances will be
+-- Astronomers may want to change the earth function so that distances will be
-- returned in degrees. To do this comment out the above definition and
-- uncomment the one below. Note that doing this will break the regression
-- tests.
* Product 9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
* 103 / 10 = 10 remainder 3
* Check digit 10 - 3 = 7
- * => 977-1144875-00-7 ?? <- suplemental number (number of the week, month, etc.)
+ * => 977-1144875-00-7 ?? <- supplemental number (number of the week, month, etc.)
* ^^ 00 for non-daily publications (01=Monday, 02=Tuesday, ...)
*
* The hyphenation is always in after the four digits of the ISSN code.
* into bufO using the given hyphenation range TABLE.
* Assumes the input string to be used is of only digits.
*
- * Returns the number of characters acctually hyphenated.
+ * Returns the number of characters actually hyphenated.
*/
static unsigned
hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned TABLE_index[10][2])
}
else if (*aux2 == '!' && *(aux2 + 1) == '\0')
{
- /* the invalid check digit sufix was found, set it */
+ /* the invalid check digit suffix was found, set it */
if (!magic)
valid = false;
magic = true;
t
(1 row)
---exractors
+--extractors
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
?column?
----------
#define STACKDEPTH 32
/*
- * make polish notaion of query
+ * make polish notation of query
*/
static int32
makepol(QPRS_STATE *state)
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
---exractors
+--extractors
SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
{
/*
* Once we have restored this file successfully we can remove some
- * prior WAL files. If this restore fails we musn't remove any
+ * prior WAL files. If this restore fails we mustn't remove any
* file because some of them will be requested again immediately
* after the failed restore, or when we restart recovery.
*/
{
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
- double min_time; /* minimim execution time in msec */
+ double min_time; /* minimum execution time in msec */
double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */
* ulen1: count of unique trigrams of array "trg1".
* len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found".
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
* text.
*
* Returns word similarity.
lastpos[trgindex] = i;
}
- /* Adjust lower bound if this trigram is present in required substing */
+ /* Adjust lower bound if this trigram is present in required substring */
if (found[trgindex])
{
int prev_lower,
*
* str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes.
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
* text.
*
* Returns word similarity.
}
/*
- * caller wants exatly len bytes and dont bother with references
+ * caller wants exactly len bytes and don't bother with references
*/
int
pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
}
/*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
*
* It should be in the same range as p for signing (which
* is deprecated), but can be much smaller for encrypting.
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
- * algorihm hovers 10-70 bits above gpg values. And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values. And for
+ * larger p, it uses gpg's algorithm.
*
* The point is - if k gets large, encryption will be
* really slow. It does not matter for decryption.
}
/*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
*
* It should be in the same range as p for signing (which
* is deprecated), but can be much smaller for encrypting.
* Until I research it further, I just mimic gpg behaviour.
* It has a special mapping table, for values <= 5120,
* above that it uses 'arbitrary high number'. Following
- * algorihm hovers 10-70 bits above gpg values. And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values. And for
+ * larger p, it uses gpg's algorithm.
*
* The point is - if k gets large, encryption will be
* really slow. It does not matter for decryption.
1
(10 rows)
--- non-Var items in targelist of the nullable rel of a join preventing
+-- non-Var items in targetlist of the nullable rel of a join preventing
-- push-down in some cases
-- unable to push {ft1, ft2}
EXPLAIN (VERBOSE, COSTS OFF)
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" OFFSET 10 LIMIT 10;
--- non-Var items in targelist of the nullable rel of a join preventing
+-- non-Var items in targetlist of the nullable rel of a join preventing
-- push-down in some cases
-- unable to push {ft1, ft2}
EXPLAIN (VERBOSE, COSTS OFF)
if (Abs(exp) <= 4)
{
/*
- * remove the decimal point from the mantyssa and write the digits
+ * remove the decimal point from the mantissa and write the digits
* to the buf array
*/
for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, i++)
* When we ask SELinux whether the required privileges are allowed or not,
* we use security_compute_av(3). It needs us to represent object classes
* and access vectors using 'external' codes defined in the security policy.
- * It is determinded in the runtime, not build time. So, it needs an internal
+ * It is determined in the runtime, not build time. So, it needs an internal
* service to translate object class/access vectors which we want to check
* into the code which kernel want to be given.
*/
SELECT sepgsql_setcon(NULL); -- end of session
SELECT sepgsql_getcon();
--- the pooler cannot touch these tables directry
+-- the pooler cannot touch these tables directly
SELECT * FROM foo_tbl; -- failed
SELECT * FROM var_tbl; -- failed
/* internal error */
elog(ERROR, "check_primary_key: cannot process DELETE events");
- /* If UPDATion the must check new Tuple, not old one */
+ /* If UPDATE, then must check new Tuple, not old one */
else
tuple = trigdata->tg_newtuple;
# modified by Ray Aspeitia 12-03-2003 :
# added log rotation script to db startup
# modified StartupParameters.plist "Provides" parameter to make it easier to
-# start and stop with the SystemStarter utitlity
+# start and stop with the SystemStarter utility
# use the below command in order to correctly start/stop/restart PG with log rotation script:
# SystemStarter [start|stop|restart] PostgreSQL
LANGUAGE INTERNAL
RETURNS NULL ON NULL INPUT;
---reset - just for debuging
+--reset - just for debugging
CREATE FUNCTION reset_tsearch()
RETURNS void
as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
/*
* At the moment we assume that the returned attributes make sense for the
- * XPath specififed (i.e. we trust the caller). It's not fatal if they get
+ * XPath specified (i.e. we trust the caller). It's not fatal if they get
* it wrong - the input function for the column type will raise an error
* if the path result can't be converted into the correct binary
* representation.
$(CC) $(CFLAGS) -shared -static-libgcc -o $@ $(OBJS) $(DLL_DEFFILE) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
endif
-endif # PORTNAME == cgywin
+endif # PORTNAME == cygwin
endif # PORTNAME == cygwin || PORTNAME == win32
The support for concurrency implemented in PostgreSQL was developed based on
the paper "Access Methods for Next-Generation Database Systems" by
-Marcel Kornaker:
+Marcel Kornacker:
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
} RewriteMappingFile;
/*
- * A single In-Memeory logical rewrite mapping, hanging of
+ * A single In-Memory logical rewrite mapping, hanging off
* RewriteMappingFile->mappings.
*/
typedef struct RewriteMappingDataEntry
/*
* Activate this module whenever necessary.
- * This must happen during postmaster or standalong-backend startup,
+ * This must happen during postmaster or standalone-backend startup,
* or during WAL replay anytime the track_commit_timestamp setting is
* changed in the master.
*
* These shouldn't happen. TBLOCK_DEFAULT means the previous
* StartTransactionCommand didn't set the STARTED state
* appropriately, while TBLOCK_PARALLEL_INPROGRESS should be ended
- * by EndParallelWorkerTranaction(), not this function.
+ * by EndParallelWorkerTransaction(), not this function.
*/
case TBLOCK_DEFAULT:
case TBLOCK_PARALLEL_INPROGRESS:
*
* Note: If the object is not found, we don't give any indication of the
* reason. (It might have been a missing schema if the name was qualified, or
- * an inexistant type name in case of a cast, function or operator; etc).
+ * a nonexistent type name in case of a cast, function or operator; etc).
* Currently there is only one caller that might be interested in such info, so
* we don't spend much effort here. If more callers start to care, it might be
* better to add some support for that in this function.
/*
- * CreateAcessMethod
+ * CreateAccessMethod
* Registers a new access method.
*/
ObjectAddress
/*
* Force synchronous commit, thus minimizing the window between
- * creation of the database files and commital of the transaction. If
+ * creation of the database files and committal of the transaction. If
* we crash before committing, we'll have a DB that's taking up disk
* space but is not in pg_database, which is not good.
*/
/*
* Force synchronous commit, thus minimizing the window between removal of
- * the database files and commital of the transaction. If we crash before
+ * the database files and committal of the transaction. If we crash before
* committing, we'll have a DB that's gone on disk but still there
* according to pg_database, which is not good.
*/
/*
* Force synchronous commit, thus minimizing the window between
- * copying the database files and commital of the transaction. If we
+ * copying the database files and committal of the transaction. If we
* crash before committing, we'll leave an orphaned set of files on
* disk, which is not fatal but not good either.
*/
}
/*
- * YAML is a superset of JSON; unfortuantely, the YAML quoting rules are
+ * YAML is a superset of JSON; unfortunately, the YAML quoting rules are
* ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
* http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
* Empty strings, strings with leading or trailing whitespace, and strings
}
else
{
- /* store SQL NULL instead of emtpy array */
+ /* store SQL NULL instead of empty array */
trftypes = NULL;
}
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cast will be ignored because the target data type is a domain")));
- /* Detemine the cast method */
+ /* Determine the cast method */
if (stmt->func != NULL)
castmethod = COERCION_METHOD_FUNCTION;
else if (stmt->inout)
* Errors arising from the attribute list still apply.
*
* Most column type changes that can skip a table rewrite do not invalidate
- * indexes. We ackowledge this when all operator classes, collations and
+ * indexes. We acknowledge this when all operator classes, collations and
* exclusion operators match. Though we could further permit intra-opfamily
* changes for btree and hash indexes, that adds subtle complexity with no
* concrete benefit for core types.
* indxpath.c could do something with. However, that seems overly
* restrictive. One useful application of partial indexes is to apply
* a UNIQUE constraint across a subset of a table, and in that scenario
- * any evaluatable predicate will work. So accept any predicate here
+ * any evaluable predicate will work. So accept any predicate here
* (except ones requiring a plan), and let indxpath.c fend for itself.
*/
static void
/*
* Check if ONLY was specified with ALTER TABLE. If so, allow the
- * contraint creation only if there are no children currently. Error out
+ * constraint creation only if there are no children currently. Error out
* otherwise.
*/
if (!recurse && children != NIL)
int plan_node_id = planstate->plan->plan_node_id;
MemoryContext oldcontext;
- /* Find the instumentation for this node. */
+ /* Find the instrumentation for this node. */
for (i = 0; i < instrumentation->num_plan_nodes; ++i)
if (instrumentation->plan_node_id[i] == plan_node_id)
break;
/*
* We must track the number of rows included in transValue, since to
- * remove the last input, advance_windowaggregate_base() musn't call the
+ * remove the last input, advance_windowaggregate_base() mustn't call the
* inverse transition function, but simply reset transValue back to its
* initial value.
*/
*
* NOTE: the IdentLine structs can contain pre-compiled regular expressions
* that live outside the memory context. Before destroying or resetting the
- * memory context, they need to be expliticly free'd.
+ * memory context, they need to be explicitly free'd.
*/
static List *parsed_ident_lines = NIL;
static MemoryContext parsed_ident_context = NULL;
for (index1 = 0; index1 < num_gene; index1++)
{
/*
- * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operaton
+ * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this operation
* maps n back to 1
*/
/*
* give priority to candidates with fewest remaining unused edges;
* find out what the minimum number of unused edges is
- * (minimum_edges); if there is more than one cadidate with the
+ * (minimum_edges); if there is more than one candidate with the
* minimum number of unused edges keep count of this number
* (minimum_count);
*/
/*
* Insist that each side have a non-redundant eclass. This
* restriction is needed because various bits of the planner expect
- * that each clause in a merge be associatable with some pathkey in a
+ * that each clause in a merge be associable with some pathkey in a
* canonical pathkey list, but redundant eclasses can't appear in
* canonical sort orderings. (XXX it might be worth relaxing this,
* but not enough time to address it for 8.3.)
/*
* Now distribute "placeholders" to base rels as needed. This has to be
* done after join removal because removal could change whether a
- * placeholder is evaluatable at a base rel.
+ * placeholder is evaluable at a base rel.
*/
add_placeholders_to_base_rels(root);
* Detect whether there is a joinclause that involves
* the two given relations.
*
- * Note: the joinclause does not have to be evaluatable with only these two
+ * Note: the joinclause does not have to be evaluable with only these two
* relations. This is intentional. For example consider
* SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
* If a is much larger than the other tables, it may be worthwhile to
Relids currentrelids,
Relids current_and_outer)
{
- /* Clause must be evaluatable given available context */
+ /* Clause must be evaluable given available context */
if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
return false;
n->lateral = true;
n->subquery = $2;
n->alias = $3;
- /* same coment as above */
+ /* same comment as above */
if ($3 == NULL)
{
if (IsA($2, SelectStmt) &&
/* Flush any leaked data in the top-level context */
MemoryContextResetAndDeleteChildren(bgwriter_context);
- /* re-initilialize to avoid repeated errors causing problems */
+ /* re-initialize to avoid repeated errors causing problems */
WritebackContextInit(&wb_context, &bgwriter_flush_after);
/* Now we can allow interrupts again */
}
/*
- * Count up number of child processes of specified types (dead_end chidren
+ * Count up number of child processes of specified types (dead_end children
* are always excluded).
*/
static int
* Return the replication progress for origin setup in the current session.
*
* If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
* commits are used when replaying replicated transactions.
*/
Datum
* Return the replication progress for an individual replication origin.
*
* If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
* commits are used when replaying replicated transactions.
*/
Datum
*
* NB: Transactions handled here have to have actively aborted (i.e. have
* produced an abort record). Implicitly aborted transactions are handled via
- * ReorderBufferAbortOld(); transactions we're just not interesteded in, but
+ * ReorderBufferAbortOld(); transactions we're just not interested in, but
* which have committed are handled in ReorderBufferForget().
*
* This function purges this transaction and its contents from memory and
* toplevel xid.
*
* This is significantly different to ReorderBufferAbort() because
- * transactions that have committed need to be treated differenly from aborted
+ * transactions that have committed need to be treated differently from aborted
* ones since they may have modified the catalog.
*
* Note that this is only allowed to be called in the moment a transaction
/*
* ok, has to be a surviving logical slot, iterate and delete
- * everythign starting with xid-*
+ * everything starting with xid-*
*/
sprintf(path, "pg_replslot/%s", logical_de->d_name);
if (builder->snapshot == NULL)
{
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
- /* inrease refcount for the snapshot builder */
+ /* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot);
}
if (builder->snapshot == NULL)
{
builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
- /* inrease refcount for the snapshot builder */
+ /* increase refcount for the snapshot builder */
SnapBuildSnapIncRefcount(builder->snapshot);
}
{
/*
* None of the originally running transaction is running anymore,
- * so our incrementaly built snapshot now is consistent.
+ * so our incrementally built snapshot now is consistent.
*/
ereport(LOG,
(errmsg("logical decoding found consistent point at %X/%X",
* reached. At most nevents occurred events are returned.
*
* If timeout = -1, block until an event occurs; if 0, check sockets for
- * readiness, but don't block; if > 0, block for at most timeout miliseconds.
+ * readiness, but don't block; if > 0, block for at most timeout milliseconds.
*
* Returns the number of events occurred, or 0 if the timeout was reached.
*
* it will point to a temporary buffer. This mostly avoids data copying in
* the hoped-for case where messages are short compared to the buffer size,
* while still allowing longer messages. In either case, the return value
- * remains valid until the next receive operation is perfomed on the queue.
+ * remains valid until the next receive operation is performed on the queue.
*
* When nowait = false, we'll wait on our process latch when the ring buffer
* is empty and we have not yet received a full message. The sender will
vxids = (VirtualTransactionId *)
palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 1));
- /* Compute hash code and partiton lock, and look up conflicting modes. */
+ /* Compute hash code and partition lock, and look up conflicting modes. */
hashcode = LockTagHashCode(locktag);
partitionLock = LockHashPartitionLock(hashcode);
conflictMask = lockMethodTable->conflictTab[lockmode];
return false;
}
else
- return true; /* someobdy else has the lock */
+ return true; /* somebody else has the lock */
}
}
pg_unreachable();
* that happens before the list unlink happens, the list would end up
* being corrupted.
*
- * The barrier pairs with the LWLockWaitListLock() when enqueing for
+ * The barrier pairs with the LWLockWaitListLock() when enqueuing for
* another lock.
*/
pg_write_barrier();
/*
* Can't just remove ourselves from the list, but we need to iterate over
- * all entries as somebody else could have unqueued us.
+ * all entries as somebody else could have dequeued us.
*/
dlist_foreach_modify(iter, &lock->waiters)
{
/*
* We can't trust XactReadOnly here, because a transaction which started
* as READ WRITE can show as READ ONLY later, e.g., within
- * substransactions. We want to flag a transaction as READ ONLY if it
+ * subtransactions. We want to flag a transaction as READ ONLY if it
* commits without writing so that de facto READ ONLY transactions get the
* benefit of some RO optimizations, so we will use this local variable to
* get some cleanup logic right which is based on whether the transaction
* Spell field. The AffixData field is initialized if AF parameter is not
* defined.
* - NISortAffixes():
- * - builds a list of compond affixes from the affix list and stores it
+ * - builds a list of compound affixes from the affix list and stores it
* in the CompoundAffix.
* - builds prefix trees (Trie) from the affix list for prefixes and suffixes
* and stores them in Suffix and Prefix fields.
if (ld->curDictId == InvalidOid)
{
/*
- * usial mode: dictionary wants only one word, but we should keep in
+ * usual mode: dictionary wants only one word, but we should keep in
* mind that we should go through all stack
*/
/*
* We should be sure that current type of lexeme is recognized
- * by our dictinonary: we just check is it exist in list of
+ * by our dictionary: we just check is it exist in list of
* dictionaries ?
*/
for (i = 0; i < map->len && !dictExists; i++)
/* start of a new fragment */
infrag = 1;
numfragments++;
- /* add a fragment delimitor if this is after the first one */
+ /* add a fragment delimiter if this is after the first one */
if (numfragments > 1)
{
memcpy(ptr, prs->fragdelim, prs->fragdelimlen);
break;
}
if (curlen < min_words && i >= prs->curwords)
- { /* got end of text and our cover is shoter
+ { /* got end of text and our cover is shorter
* than min_words */
for (i = p - 1; i >= 0; i--)
{
for (last = 0, a = array; *a != NULL; a++)
{
- /* comperate first chars */
+ /* compare first chars */
if (*name != **a)
continue;
{
/*
* Lower bound no longer matters. Just estimate the fraction
- * with an upper bound <= const uppert bound
+ * with an upper bound <= const upper bound
*/
hist_selec =
calc_hist_selectivity_scalar(typcache, &const_upper,
}
/*
- * Append used transformated types to specified buffer
+ * Append used transformed types to specified buffer
*/
static void
print_function_trftypes(StringInfo buf, HeapTuple proctup)
/*
* if doc are big enough then ext.q may be equal to ext.p due to limit
- * of posional information. In this case we approximate number of
+ * of positional information. In this case we approximate number of
* noise word as half cover's length
*/
nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
Wdoc += Cpos / ((double) (1 + nNoise));
CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
- if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent devision by
+ if (NExtent > 0 && CurExtPos > PrevExtPos /* prevent division by
* zero in a case of
multiple lexize */ )
SumDist += 1.0 / (CurExtPos - PrevExtPos);
/*
* lag_with_offset
- * returns the value of VE evelulated on a row that is OFFSET
+ * returns the value of VE evaluated on a row that is OFFSET
* rows before the current row within a partition,
* per spec.
*/
* points to the current file since the older file will be gone (or
* truncated). The new file will still contain older rows so lookups
* in them will work correctly. This wouldn't work correctly if
- * rewrites were allowed to change the schema in a noncompatible way,
+ * rewrites were allowed to change the schema in an incompatible way,
* but those are prevented both on catalog tables and on user tables
* declared as additional catalog tables.
*/
/*
* get_func_trftypes
*
- * Returns a number of transformated types used by function.
+ * Returns the number of transformed types used by function.
*/
int
get_func_trftypes(HeapTuple procTup,
relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
- /* read all the settings under the same snapsot for efficiency */
+ /* read all the settings under the same snapshot for efficiency */
snapshot = RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
/* Later settings are ignored if set earlier. */
ps_status.o rls.o sampling.o superuser.o timeout.o tzparser.o
# This location might depend on the installation directories. Therefore
-# we can't subsitute it into pg_config.h.
+# we can't substitute it into pg_config.h.
ifdef krb_srvtab
override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
endif
}
/*
- * check whether the transaciont id 'xid' is in the pre-sorted array 'xip'.
+ * check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
*/
static bool
TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
*
* Optional.
*
- * Set up extrac format-related TOC data.
+ * Set up extract format-related TOC data.
*/
static void
_ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
char *varname;
char *value;
- /* concate prefix and column name */
+ /* concatenate prefix and column name */
varname = psprintf("%s%s", pset.gset_prefix, colname);
if (!PQgetisnull(result, 0, i))
printTableAddFooter(&cont, _("Check constraints:"));
for (i = 0; i < tuples; i++)
{
- /* untranslated contraint name and def */
+ /* untranslated constraint name and def */
printfPQExpBuffer(&buf, " \"%s\" %s",
PQgetvalue(result, i, 0),
PQgetvalue(result, i, 1));
if (verbose)
{
/*
- * As of PostgreSQL 9.0, use pg_table_size() to show a more acurate
+ * As of PostgreSQL 9.0, use pg_table_size() to show a more accurate
* size of a table, including FSM, VM and TOAST tables.
*/
if (pset.sversion >= 90000)
#define VISIBILITYMAP_ALL_VISIBLE 0x01
#define VISIBILITYMAP_ALL_FROZEN 0x02
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid
- * visiblitymap flags bits */
+ * visibilitymap flags bits */
/* Macros for visibilitymap test */
#define VM_ALL_VISIBLE(r, b, v) \
* apply */
} SyncCommitLevel;
-/* Define the default setting for synchonous_commit */
+/* Define the default setting for synchronous_commit */
#define SYNCHRONOUS_COMMIT_ON SYNCHRONOUS_COMMIT_REMOTE_FLUSH
/* Synchronous commit level */
/* gettext domain name mangling */
/*
- * To better support parallel installations of major PostgeSQL
+ * To better support parallel installations of major PostgreSQL
* versions as well as parallel installations of major library soname
* versions, we mangle the gettext domain name by appending those
* version numbers. The coding rule ought to be that wherever the
#define SPIN_DELAY() spin_delay()
/* If using Visual C++ on Win64, inline assembly is unavailable.
- * Use a _mm_pause instrinsic instead of rep nop.
+ * Use a _mm_pause intrinsic instead of rep nop.
*/
#if defined(_WIN64)
static __forceinline void
} CMPDAffix;
/*
- * Type of encoding affix flags in Hunspel dictionaries
+ * Type of encoding affix flags in Hunspell dictionaries
*/
typedef enum
{
/*
* The aim is to get a simpler interface to the database routines.
- * All the tidieous messing around with tuples is supposed to be hidden
+ * All the tedious messing around with tuples is supposed to be hidden
* by this function.
*/
/* Author: Linus Tolke
*
* function works as follows:
* - first we analyze the parameters
- * - if this is a special case with no delimiters, add delimters
+ * - if this is a special case with no delimiters, add delimiters
* - find the tokens. First we look for numerical values. If we have found
* less than 3 tokens, we check for the months' names and thereafter for
* the abbreviations of the months' names.
{
/* use cmp_abs function to calculate the result */
- /* both are positive: normal comparation with cmp_abs */
+ /* both are positive: normal comparison with cmp_abs */
if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
return cmp_abs(var1, var2);
- /* both are negative: return the inverse of the normal comparation */
+ /* both are negative: return the inverse of the normal comparison */
if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
{
/*
/* In case we have a struct, we have to print as many "?" as there are attributes in the struct
* An array is only allowed together with an element argument
- * This is essantially only used for inserts, but using a struct as input parameter is an error anywhere else
+ * This is essentially only used for inserts, but using a struct as input parameter is an error anywhere else
* so we don't have to worry here. */
if (p->type->type == ECPGt_struct || (array && p->type->type == ECPGt_array && p->type->u.element->type == ECPGt_struct))
$$ = $3;
};
/*
- * variable decalartion outside exec sql declare block
+ * variable declaration outside exec sql declare block
*/
ECPGVarDeclaration: single_vt_declaration;
free(forward_name);
forward_name = NULL;
- /* This is essantially a typedef but needs the keyword struct/union as well.
+ /* This is essentially a typedef but needs the keyword struct/union as well.
* So we create the typedef for each struct definition with symbol */
for (ptr = types; ptr != NULL; ptr = ptr->next)
{
;
/*
- * set/reset the automatic transaction mode, this needs a differnet handling
+ * set/reset the automatic transaction mode, this needs a different handling
* as the other set commands
*/
ECPGSetAutocommit: SET SQL_AUTOCOMMIT '=' on_off { $$ = $4; }
;
/*
- * set the actual connection, this needs a differnet handling as the other
+ * set the actual connection, this needs a different handling as the other
* set commands
*/
ECPGSetConnection: SET CONNECTION TO connection_object { $$ = $4; }
if ($len == 1)
{
- # Straight assignement
+ # Straight assignment
$str = ' $$ = ' . $flds_new[0] . ';';
add_to_buffer('rules', $str);
}
* be sent in cleartext if it is encrypted on the client side. This is
* good because it ensures the cleartext password won't end up in logs,
* pg_stat displays, etc. We export the function so that clients won't
- * be dependent on low-level details like whether the enceyption is MD5
+ * be dependent on low-level details like whether the encryption is MD5
* or something else.
*
* Arguments are the cleartext password, and the SQL name of the user it
#endif
/*
- * The SSL implementatation provides these functions (fe-secure-openssl.c)
+ * The SSL implementation provides these functions (fe-secure-openssl.c)
*/
extern void pgtls_init_library(bool do_ssl, int do_crypto);
extern int pgtls_init(PGconn *conn);
#include "win32.h"
-/* Declared here to avoid pulling in all includes, which causes name collissions */
+/* Declared here to avoid pulling in all includes, which causes name collisions */
#ifdef ENABLE_NLS
extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1);
#else
If this option is given, a copy of each file will be saved with
the given suffix that contains the suggested changes. This does
not require any external programs. Note that this does not
-automagially add a dot between the original filename and the
+automagically add a dot between the original filename and the
suffix. If you want the dot, you have to include it in the option
argument.
OP * const modname = newSVOP(OP_CONST, 0, name);
/* 5.005 has a somewhat hacky force_normal that doesn't croak on
- SvREADONLY() if PL_compling is true. Current perls take care in
+ SvREADONLY() if PL_compiling is true. Current perls take care in
ck_require() to correctly turn off SvREADONLY before calling
- force_normal_flags(). This seems a better fix than fudging PL_compling
+ force_normal_flags(). This seems a better fix than fudging PL_compiling
*/
SvREADONLY_off(((SVOP*)modname)->op_sv);
modname->op_private |= OPpCONST_BARE;
long plain_lineno;
/*
- * The second frame points at the internal function, but to mimick
+ * The second frame points at the internal function, but to mimic
* Python error reporting we want to say <module>.
*/
if (*tb_depth == 1)
if (strcmp(keyword, "message") == 0)
{
- /* the message should not be overwriten */
+ /* the message should not be overwritten */
if (PyTuple_Size(args) != 0)
{
PLy_exception_set(PyExc_TypeError, "Argument 'message' given by name and position");
PLyObToTuple r;
} PLyTypeOutput;
-/* all we need to move Postgresql data to Python objects,
+/* all we need to move PostgreSQL data to Python objects,
* and vice versa
*/
typedef struct PLyTypeInfo
# be changed and a report of the closed day's receipts subsequently
# run which will miss a receipt from the date which has been closed.
#
-# There are only six permuations which must cause a serialization failure.
+# There are only six permutations which must cause a serialization failure.
# Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
# s3 executes its first SELECT.
#
#
# Small, simple test showing read-only anomalies.
#
-# There are only four permuations which must cause a serialization failure.
+# There are only four permutations which must cause a serialization failure.
# Required failure cases are where s2 overlaps both s1 and s3, but s1
# commits before s3 executes its first SELECT.
#
-- Try (and fail) to add constraint due to invalid source columns
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
ERROR: column "c" referenced in foreign key constraint does not exist
--- Try (and fail) to add constraint due to invalide destination columns explicitly given
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
ERROR: column "b" referenced in foreign key constraint does not exist
-- Try (and fail) to add constraint due to invalid data
ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
ERROR: could not change table "unlogged2" to logged because it references unlogged table "unlogged1"
ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permament
+-- check relpersistence of an unlogged table after changing to permanent
SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
UNION ALL
SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
("one-toasted,one-null",1,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
(4 rows)
--- modification without modifying asigned value
+-- modification without modifying assigned value
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
substring
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
("one-toasted,one-null",4,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
(4 rows)
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
VACUUM FREEZE toasttest;
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
substring
("one-toasted,one-null",5,,12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123
(4 rows)
--- modification without modifying asigned value
+-- modification without modifying assigned value
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
substring
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
("one-toasted,one-null, via indirect",0,1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890
(5 rows)
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
VACUUM FREEZE toasttest;
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
substring
--- Test iniital privileges
+-- Test initial privileges
-- There should always be some initial privileges, set up by initdb
SELECT count(*) > 0 FROM pg_init_privs;
?column?
ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification
drop index comp_key_index;
--
--- Partial index tests, no inference predicate specificied
+-- Partial index tests, no inference predicate specified
--
create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
-- Test hints given on incorrect column references are useful
--
select t1.uunique1 from
- tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
ERROR: column t1.uunique1 does not exist
LINE 1: select t1.uunique1 from
^
-- test diemv when the mv does not exist
DROP MATERIALIZED VIEW IF EXISTS no_such_mv;
NOTICE: materialized view "no_such_mv" does not exist, skipping
--- make sure invalid comination of options is prohibited
+-- make sure invalid combination of options is prohibited
REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA;
ERROR: CONCURRENTLY and WITH NO DATA options cannot be used together
-- no tuple locks on materialized views
--
-- Install the central phone system and create the phone numbers.
--- They are weired on insert to the patchfields. Again the
+-- They are wired on insert to the patchfields. Again the
-- triggers automatically tell the PSlots to update their
-- backlink field.
--
-- succeed, oid unique index
ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx;
--- succeed, nondeferrable unique constraint over nonullable cols
+-- succeed, nondeferrable unique constraint over nonnullable cols
ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer;
-- succeed unique index over nonnullable cols
ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key;
^
ALTER USER nonexistent SET application_name to 'BOMB'; -- error
ERROR: role "nonexistent" does not exist
--- CREAETE SCHEMA
+-- CREATE SCHEMA
set client_min_messages to error;
CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
shoename char(10), -- primary key
sh_avail integer, -- available # of pairs
slcolor char(10), -- preferred shoelace color
- slminlen float, -- miminum shoelace length
+ slminlen float, -- minimum shoelace length
slmaxlen float, -- maximum shoelace length
slunit char(8) -- length unit
);
{foot,ball,klubber}
(1 row)
--- Synonim dictionary
+-- Synonym dictionary
CREATE TEXT SEARCH DICTIONARY synonym (
Template=synonym,
Synonyms=synonym_sample
-- Try (and fail) to add constraint due to invalid source columns
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
--- Try (and fail) to add constraint due to invalide destination columns explicitly given
+-- Try (and fail) to add constraint due to invalid destination columns explicitly given
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
-- Try (and fail) to add constraint due to invalid data
ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an unlogged table exists
ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permament
+-- check relpersistence of an unlogged table after changing to permanent
SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ '^unlogged1'
UNION ALL
SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
-- modification without changing varlenas
UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
--- modification without modifying asigned value
+-- modification without modifying assigned value
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
-- modification modifying, but effectively not changing
UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING substring(toasttest::text, 1, 200);
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
VACUUM FREEZE toasttest;
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
-- modification without changing varlenas
UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
--- modification without modifying asigned value
+-- modification without modifying assigned value
UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING substring(toasttest::text, 1, 200);
-- modification modifying, but effectively not changing
INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via indirect', repeat('1234567890',30000), NULL);
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
VACUUM FREEZE toasttest;
SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- Test iniital privileges
+-- Test initial privileges
-- There should always be some initial privileges, set up by initdb
SELECT count(*) > 0 FROM pg_init_privs;
drop index comp_key_index;
--
--- Partial index tests, no inference predicate specificied
+-- Partial index tests, no inference predicate specified
--
create unique index part_comp_key_index on insertconflicttest(key, fruit) where key < 5;
create unique index expr_part_comp_key_index on insertconflicttest(key, lower(fruit)) where key < 5;
--
select t1.uunique1 from
- tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn
+ tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
select t2.uunique1 from
tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion
select uunique1 from
-- test diemv when the mv does not exist
DROP MATERIALIZED VIEW IF EXISTS no_such_mv;
--- make sure invalid comination of options is prohibited
+-- make sure invalid combination of options is prohibited
REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA;
-- no tuple locks on materialized views
--
-- Install the central phone system and create the phone numbers.
--- They are weired on insert to the patchfields. Again the
+-- They are wired on insert to the patchfields. Again the
-- triggers automatically tell the PSlots to update their
-- backlink field.
--
-- succeed, oid unique index
ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_oid_idx;
--- succeed, nondeferrable unique constraint over nonullable cols
+-- succeed, nondeferrable unique constraint over nonnullable cols
ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer;
-- succeed unique index over nonnullable cols
ALTER USER NONE SET application_name to 'BOMB'; -- error
ALTER USER nonexistent SET application_name to 'BOMB'; -- error
--- CREAETE SCHEMA
+-- CREATE SCHEMA
set client_min_messages to error;
CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
shoename char(10), -- primary key
sh_avail integer, -- available # of pairs
slcolor char(10), -- preferred shoelace color
- slminlen float, -- miminum shoelace length
+ slminlen float, -- minimum shoelace length
slmaxlen float, -- maximum shoelace length
slunit char(8) -- length unit
);
SELECT ts_lexize('hunspell_num', 'ballyklubber');
SELECT ts_lexize('hunspell_num', 'footballyklubber');
--- Synonim dictionary
+-- Synonym dictionary
CREATE TEXT SEARCH DICTIONARY synonym (
Template=synonym,
Synonyms=synonym_sample
# - ssl/root+client_ca.crt as the CA root for validating client certs.
# - reject non-SSL connections
# - a database called trustdb that lets anyone in
-# - another database called certdb that uses certificate authentiction, ie.
+# - another database called certdb that uses certificate authentication, ie.
# the client must present a valid certificate signed by the client CA
# - two users, called ssltestuser and anotheruser.
#