From f71145d0ec163439805d0433acb2e44a782e01f7 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Tue, 17 Aug 2010 04:37:19 +0000 Subject: [PATCH] Spell and markup checking --- doc/src/sgml/arch-dev.sgml | 4 +- doc/src/sgml/bki.sgml | 4 +- doc/src/sgml/btree-gin.sgml | 12 +- doc/src/sgml/btree-gist.sgml | 8 +- doc/src/sgml/catalogs.sgml | 266 +++++++++++++------------- doc/src/sgml/chkpass.sgml | 6 +- doc/src/sgml/citext.sgml | 10 +- doc/src/sgml/client-auth.sgml | 66 +++---- doc/src/sgml/config.sgml | 23 ++- doc/src/sgml/contrib-spi.sgml | 6 +- doc/src/sgml/cube.sgml | 10 +- doc/src/sgml/cvs.sgml | 32 ++-- doc/src/sgml/datatype.sgml | 4 +- doc/src/sgml/dblink.sgml | 8 +- doc/src/sgml/diskusage.sgml | 4 +- doc/src/sgml/docguide.sgml | 6 +- doc/src/sgml/earthdistance.sgml | 6 +- doc/src/sgml/ecpg.sgml | 106 +++++----- doc/src/sgml/external-projects.sgml | 4 +- doc/src/sgml/func.sgml | 16 +- doc/src/sgml/fuzzystrmatch.sgml | 4 +- doc/src/sgml/gin.sgml | 41 ++-- doc/src/sgml/gist.sgml | 24 +-- doc/src/sgml/high-availability.sgml | 16 +- doc/src/sgml/hstore.sgml | 6 +- doc/src/sgml/indexam.sgml | 44 ++--- doc/src/sgml/indices.sgml | 8 +- doc/src/sgml/installation.sgml | 8 +- doc/src/sgml/intarray.sgml | 4 +- doc/src/sgml/isn.sgml | 6 +- doc/src/sgml/libpq.sgml | 96 +++++----- doc/src/sgml/lo.sgml | 4 +- doc/src/sgml/ltree.sgml | 4 +- doc/src/sgml/monitoring.sgml | 4 +- doc/src/sgml/oid2name.sgml | 8 +- doc/src/sgml/pageinspect.sgml | 10 +- doc/src/sgml/pgarchivecleanup.sgml | 6 +- doc/src/sgml/pgbench.sgml | 12 +- doc/src/sgml/pgcrypto.sgml | 14 +- doc/src/sgml/pgfreespacemap.sgml | 5 +- doc/src/sgml/pgstandby.sgml | 6 +- doc/src/sgml/pgstattuple.sgml | 6 +- doc/src/sgml/pgtrgm.sgml | 4 +- doc/src/sgml/pgupgrade.sgml | 276 +++++++++++++-------------- doc/src/sgml/planstats.sgml | 4 +- doc/src/sgml/plperl.sgml | 6 +- doc/src/sgml/plpgsql.sgml | 4 +- doc/src/sgml/ref/alter_opfamily.sgml | 4 +- doc/src/sgml/ref/alter_role.sgml | 4 +- doc/src/sgml/ref/create_opclass.sgml | 4 +- doc/src/sgml/ref/create_table.sgml | 4 +- doc/src/sgml/ref/create_trigger.sgml | 4 +- doc/src/sgml/ref/pg_ctl-ref.sgml | 8 +- doc/src/sgml/ref/pg_dumpall.sgml | 6 +- doc/src/sgml/ref/pg_restore.sgml | 4 +- doc/src/sgml/ref/psql-ref.sgml | 10 +- doc/src/sgml/regress.sgml | 6 +- doc/src/sgml/release-9.0.sgml | 80 ++++---- doc/src/sgml/release.sgml | 4 +- doc/src/sgml/seg.sgml | 6 +- doc/src/sgml/spi.sgml | 10 +- doc/src/sgml/storage.sgml | 4 +- doc/src/sgml/tablefunc.sgml | 10 +- doc/src/sgml/trigger.sgml | 4 +- doc/src/sgml/unaccent.sgml | 2 +- doc/src/sgml/vacuumlo.sgml | 4 +- doc/src/sgml/wal.sgml | 4 +- doc/src/sgml/xindex.sgml | 24 ++- doc/src/sgml/xml2.sgml | 16 +- doc/src/sgml/xoper.sgml | 6 +- 70 files changed, 728 insertions(+), 731 deletions(-) diff --git a/doc/src/sgml/arch-dev.sgml b/doc/src/sgml/arch-dev.sgml index 888f6c2a50..b656f9b9e4 100644 --- a/doc/src/sgml/arch-dev.sgml +++ b/doc/src/sgml/arch-dev.sgml @@ -1,4 +1,4 @@ - + Overview of PostgreSQL Internals @@ -463,7 +463,7 @@ needed, plus any auxiliary steps needed, such as sort nodes or aggregate-function calculation nodes. Most of these plan node types have the additional ability to do selection - (discarding rows that do not meet a specified boolean condition) + (discarding rows that do not meet a specified Boolean condition) and projection (computation of a derived column set based on given column values, that is, evaluation of scalar expressions where needed). One of the responsibilities of the diff --git a/doc/src/sgml/bki.sgml b/doc/src/sgml/bki.sgml index d747f9244e..3f382ea017 100644 --- a/doc/src/sgml/bki.sgml +++ b/doc/src/sgml/bki.sgml @@ -1,4 +1,4 @@ - + <acronym>BKI</acronym> Backend Interface @@ -122,7 +122,7 @@ The table is created as shared if shared_relation is specified. It will have OIDs unless without_oids is specified. - The table's rowtype OID (pg_type OID) can optionally + The table's row type OID (pg_type OID) can optionally be specified via the rowtype_oid clause; if not specified, an OID is automatically generated for it. (The rowtype_oid clause is useless if bootstrap is specified, but it can be diff --git a/doc/src/sgml/btree-gin.sgml b/doc/src/sgml/btree-gin.sgml index d2142028eb..eb111bb827 100644 --- a/doc/src/sgml/btree-gin.sgml +++ b/doc/src/sgml/btree-gin.sgml @@ -1,4 +1,4 @@ - + btree_gin @@ -9,7 +9,7 @@ btree_gin provides sample GIN operator classes that - implement B-Tree equivalent behavior for the data types + implement B-tree equivalent behavior for the data types int2, int4, int8, float4, float8, timestamp with time zone, timestamp without time zone, time with time zone, @@ -21,12 +21,12 @@ In general, these operator classes will not outperform the equivalent - standard btree index methods, and they lack one major feature of the - standard btree code: the ability to enforce uniqueness. However, + standard B-tree index methods, and they lack one major feature of the + standard B-tree code: the ability to enforce uniqueness. However, they are useful for GIN testing and as a base for developing other GIN operator classes. Also, for queries that test both a GIN-indexable - column and a btree-indexable column, it might be more efficient to create - a multi-column GIN index that uses one of these opclasses than to create + column and a B-tree-indexable column, it might be more efficient to create + a multicolumn GIN index that uses one of these operator classes than to create two separate indexes that would have to be combined via bitmap ANDing. diff --git a/doc/src/sgml/btree-gist.sgml b/doc/src/sgml/btree-gist.sgml index 8264d5a219..1d0a1e8dfe 100644 --- a/doc/src/sgml/btree-gist.sgml +++ b/doc/src/sgml/btree-gist.sgml @@ -1,4 +1,4 @@ - + btree_gist @@ -9,7 +9,7 @@ btree_gist provides sample GiST operator classes that - implement B-Tree equivalent behavior for the data types + implement B-tree equivalent behavior for the data types int2, int4, int8, float4, float8, numeric, timestamp with time zone, timestamp without time zone, time with time zone, @@ -21,8 +21,8 @@ In general, these operator classes will not outperform the equivalent - standard btree index methods, and they lack one major feature of the - standard btree code: the ability to enforce uniqueness. However, + standard B-tree index methods, and they lack one major feature of the + standard B-tree code: the ability to enforce uniqueness. However, they are useful for GiST testing and as a base for developing other GiST operator classes. diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 4ab833133c..4732481e27 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -1,4 +1,4 @@ - + @@ -344,8 +344,8 @@ The initial value of the transition state. This is a text field containing the initial value in its external string - representation. If this field is NULL, the transition state - value starts out NULL + representation. If this field is null, the transition state + value starts out null. @@ -461,7 +461,7 @@ amsearchnulls bool - Does the access method support IS NULL/NOT NULL searches? + Does the access method support IS NULL/NOT NULL searches? @@ -892,7 +892,7 @@ The exact meaning of positive values is data type-dependent. For scalar data types, attstattarget is both the target number of most common values - to collect, and the target number of histogram bins to create + to collect, and the target number of histogram bins to create. @@ -913,7 +913,7 @@ The number of the column. Ordinary columns are numbered from 1 up. System columns, such as oid, - have (arbitrary) negative numbers + have (arbitrary) negative numbers. @@ -924,7 +924,7 @@ Number of dimensions, if the column is an array type; otherwise 0. (Presently, the number of dimensions of an array is not enforced, - so any nonzero value effectively means it's an array) + so any nonzero value effectively means it's an array.) @@ -948,7 +948,7 @@ supplied at table creation time (for example, the maximum length of a varchar column). It is passed to type-specific input functions and length coercion functions. - The value will generally be -1 for types that do not need atttypmod + The value will generally be -1 for types that do not need atttypmod. @@ -968,7 +968,7 @@ Normally a copy of pg_type.typstorage of this column's type. For TOAST-able data types, this can be altered - after column creation to control storage policy + after column creation to control storage policy. @@ -987,7 +987,7 @@ This represents a not-null constraint. It is possible to - change this column to enable or disable the constraint + change this column to enable or disable the constraint. @@ -998,7 +998,7 @@ This column has a default value, in which case there will be a corresponding entry in the pg_attrdef - catalog that actually defines the value + catalog that actually defines the value. @@ -1009,7 +1009,7 @@ This column has been dropped and is no longer valid. A dropped column is still physically present in the table, but is - ignored by the parser and so cannot be accessed via SQL + ignored by the parser and so cannot be accessed via SQL. @@ -1019,7 +1019,7 @@ This column is defined locally in the relation. Note that a column can - be locally defined and inherited simultaneously + be locally defined and inherited simultaneously. @@ -1029,7 +1029,7 @@ The number of direct ancestors this column has. A column with a - nonzero number of ancestors cannot be dropped nor renamed + nonzero number of ancestors cannot be dropped nor renamed. @@ -1163,7 +1163,7 @@ bool Role can log in. That is, this role can be given as the initial - session authorization identifier + session authorization identifier. @@ -1172,21 +1172,21 @@ int4 For roles that can log in, this sets maximum number of concurrent - connections this role can make. -1 means no limit + connections this role can make. -1 means no limit. rolpassword text - Password (possibly encrypted); NULL if none + Password (possibly encrypted); null if none rolvaliduntil timestamptz Password expiry time (only used for password authentication); - NULL if no expiration + null if no expiration @@ -1338,7 +1338,7 @@ a means implicitly in assignment to a target column, as well as explicitly. i means implicitly in expressions, as well as the - other cases + other cases. @@ -1349,7 +1349,7 @@ Indicates how the cast is performed. f means that the function specified in the castfunc field is used. i means that the input/output functions are used. - b means that the types are binary-coercible, thus no conversion is required + b means that the types are binary-coercible, thus no conversion is required. @@ -1487,7 +1487,7 @@ BLCKSZ). This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as - CREATE INDEX + CREATE INDEX. @@ -1499,7 +1499,7 @@ Number of rows in the table. This is only an estimate used by the planner. It is updated by VACUUM, ANALYZE, and a few DDL commands such as - CREATE INDEX + CREATE INDEX. @@ -1510,7 +1510,7 @@ OID of the TOAST table associated with this table, 0 if none. The TOAST table stores large attributes out of line in a - secondary table + secondary table. @@ -1519,7 +1519,7 @@ oid pg_class.oid - For a TOAST table, the OID of its index. 0 if not a TOAST table + For a TOAST table, the OID of its index. 0 if not a TOAST table. @@ -1539,7 +1539,7 @@ True if this table is shared across all databases in the cluster. Only certain system catalogs (such as pg_database) - are shared + are shared. @@ -1549,7 +1549,7 @@ True if this table is a temporary relation. If so, only the creating - session can safely access its contents + session can safely access its contents. @@ -1573,7 +1573,7 @@ Number of user columns in the relation (system columns not counted). There must be this many corresponding entries in pg_attribute. See also - pg_attribute.attnum + pg_attribute.attnum. @@ -1652,7 +1652,7 @@ (frozen) transaction ID in this table. This is used to track whether the table needs to be vacuumed in order to prevent transaction ID wraparound or to allow pg_clog to be shrunk. Zero - (InvalidTransactionId) if the relation is not a table + (InvalidTransactionId) if the relation is not a table. @@ -1681,7 +1681,7 @@ - Several of the boolean flags in pg_class are maintained + Several of the Boolean flags in pg_class are maintained lazily: they are guaranteed to be true if that's the correct state, but may not be reset to false immediately when the condition is no longer true. For example, relhasindex is set by @@ -1847,7 +1847,7 @@ This constraint is defined locally for the relation. Note that a - constraint can be locally defined and inherited simultaneously + constraint can be locally defined and inherited simultaneously. @@ -1858,7 +1858,7 @@ The number of direct inheritance ancestors this constraint has. A constraint with - a nonzero number of ancestors cannot be dropped nor renamed + a nonzero number of ancestors cannot be dropped nor renamed. @@ -2128,7 +2128,7 @@ If false then no one can connect to this database. This is - used to protect the template0 database from being altered + used to protect the template0 database from being altered. @@ -2138,7 +2138,7 @@ Sets maximum number of concurrent connections that can be made - to this database. -1 means no limit + to this database. -1 means no limit. @@ -2162,7 +2162,7 @@ track whether the database needs to be vacuumed in order to prevent transaction ID wraparound or to allow pg_clog to be shrunk. It is the minimum of the per-table - pg_class.relfrozenxid values + pg_class.relfrozenxid values. @@ -2175,7 +2175,7 @@ Within this database, all tables for which pg_class.reltablespace is zero will be stored in this tablespace; in particular, all the non-shared - system catalogs will be there + system catalogs will be there. @@ -2273,7 +2273,7 @@ - Note that when an ACL entry in another catalog is NULL, it is taken + Note that when an ACL entry in another catalog is null, it is taken to represent the hard-wired default privileges for its object, not whatever might be in pg_default_acl at the moment. pg_default_acl is only consulted during @@ -2340,7 +2340,7 @@ For a table column, this is the column number (the objid and classid refer to the table itself). For all other object types, this column is - zero + zero. @@ -2366,7 +2366,7 @@ For a table column, this is the column number (the refobjid and refclassid refer to the table itself). For all other object types, this column - is zero + is zero. @@ -2515,7 +2515,7 @@ For a comment on a table column, this is the column number (the objoid and classoid refer to the table itself). For all other object types, this column is - zero + zero. @@ -2743,7 +2743,7 @@ text[] - Foreign server specific options, as keyword=value strings. + Foreign server specific options, as keyword=value strings @@ -2840,7 +2840,7 @@ index is possibly incomplete: it must still be modified by INSERT/UPDATE operations, but it cannot safely be used for queries. If it is unique, the uniqueness property is not - true either + true either. @@ -2850,7 +2850,7 @@ If true, queries must not use the index until the xmin - of this pg_index row is below their TransactionXmin + of this pg_index row is below their TransactionXmin event horizon, because the table may contain broken HOT chains with incompatible rows that they can see @@ -2863,7 +2863,7 @@ If true, the index is currently ready for inserts. False means the index must be ignored by INSERT/UPDATE - operations + operations. @@ -2877,7 +2877,7 @@ of 1 3 would mean that the first and the third table columns make up the index key. A zero in this array indicates that the corresponding index attribute is an expression over the table columns, - rather than a simple column reference + rather than a simple column reference. @@ -2888,7 +2888,7 @@ For each column in the index key, this contains the OID of the operator class to use. See - pg_opclass for details + pg_opclass for details. @@ -2899,7 +2899,7 @@ This is an array of indnatts values that store per-column flag bits. The meaning of the bits is defined by - the index's access method + the index's access method. @@ -2907,18 +2907,24 @@ indexprs text - Expression trees (in nodeToString() representation) - for index attributes that are not simple column references. This is a - list with one element for each zero entry in indkey. - NULL if all index attributes are simple references + + Expression trees (in nodeToString() + representation) for index attributes that are not simple column + references. This is a list with one element for each zero + entry in indkey. Null if all index attributes + are simple references. + indpred text - Expression tree (in nodeToString() representation) - for partial index predicate. NULL if not a partial index + + Expression tree (in nodeToString() + representation) for partial index predicate. Null if not a + partial index. + @@ -2980,7 +2986,7 @@ If there is more than one direct parent for a child table (multiple inheritance), this number tells the order in which the - inherited columns are to be arranged. The count starts at 1 + inherited columns are to be arranged. The count starts at 1. @@ -3041,7 +3047,7 @@ SQL) and true for user-defined languages. Currently, pg_dump still uses this to determine which languages need to be dumped, but this might be - replaced by a different mechanism in the future + replaced by a different mechanism in the future. @@ -3053,7 +3059,7 @@ True if this is a trusted language, which means that it is believed not to grant access to anything outside the normal SQL execution environment. Only superusers can create functions in untrusted - languages + languages. @@ -3077,7 +3083,7 @@ This references a function that is responsible for executing inline anonymous code blocks ( blocks). - Zero if inline blocks are not supported + Zero if inline blocks are not supported. @@ -3088,7 +3094,7 @@ This references a language validator function that is responsible for checking the syntax and validity of new functions when they - are created. Zero if no validator is provided + are created. Zero if no validator is provided. @@ -3133,7 +3139,7 @@ pg_largeobject was publicly readable and could be used to obtain the OIDs (and contents) of all large objects in the system. This is no longer the case; use - pg_largeobject_metadata + pg_largeobject_metadata to obtain a list of large object OIDs. @@ -3172,7 +3178,7 @@ Actual data stored in the large object. - This will never be more than LOBLKSIZE bytes and might be less + This will never be more than LOBLKSIZE bytes and might be less. @@ -3201,7 +3207,7 @@ The catalog pg_largeobject_metadata holds metadata associated with large objects. The actual large object data is stored in - pg_largeobject. + pg_largeobject. @@ -3222,7 +3228,7 @@ lomowneroidpg_authid.oid - Owner of the largeobject + Owner of the large object @@ -3693,13 +3699,13 @@ tmplinline text - Name of anonymous-block handler function, or NULL if none + Name of anonymous-block handler function, or null if none tmplvalidator text - Name of validator function, or NULL if none + Name of validator function, or null if none @@ -3853,7 +3859,7 @@ Function returns null if any call argument is null. In that case the function won't actually be called at all. Functions that are not strict must be prepared to handle - null inputs + null inputs. @@ -3913,7 +3919,7 @@ An array with the data types of the function arguments. This includes only input arguments (including INOUT and VARIADIC arguments), and thus represents - the call signature of the function + the call signature of the function. @@ -3927,7 +3933,7 @@ INOUT arguments); however, if all the arguments are IN arguments, this field will be null. Note that subscripting is 1-based, whereas for historical reasons - proargtypes is subscripted from 0 + proargtypes is subscripted from 0. @@ -3945,7 +3951,7 @@ If all the arguments are IN arguments, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes + proallargtypes not proargtypes. @@ -3958,7 +3964,7 @@ Arguments without a name are set to empty strings in the array. If none of the arguments have a name, this field will be null. Note that subscripts correspond to positions of - proallargtypes not proargtypes + proallargtypes not proargtypes. @@ -3972,7 +3978,7 @@ pronargdefaults elements, corresponding to the last N input arguments (i.e., the last N proargtypes positions). - If none of the arguments have defaults, this field will be null + If none of the arguments have defaults, this field will be null. @@ -3984,7 +3990,7 @@ This tells the function handler how to invoke the function. It might be the actual source code of the function for interpreted languages, a link symbol, a file name, or just about anything - else, depending on the implementation language/call convention + else, depending on the implementation language/call convention. @@ -3994,7 +4000,7 @@ Additional information about how to invoke the function. - Again, the interpretation is language-specific + Again, the interpretation is language-specific. @@ -4276,7 +4282,7 @@ For a table column, this is the column number (the objid and classid refer to the - table itself). For all other object types, this column is zero + table itself). For all other object types, this column is zero. @@ -4548,7 +4554,7 @@ of rows in the table; for example, a column in which values appear about twice on the average could be represented by stadistinct = -0.5. - A zero value means the number of distinct values is unknown + A zero value means the number of distinct values is unknown. @@ -4559,7 +4565,7 @@ A code number indicating the kind of statistics stored in the Nth slot of the - pg_statistic row + pg_statistic row. @@ -4571,7 +4577,7 @@ An operator used to derive the statistics stored in the Nth slot. For example, a histogram slot would show the < operator - that defines the sort order of the data + that defines the sort order of the data. @@ -4581,7 +4587,7 @@ Numerical statistics of the appropriate kind for the - Nth slot, or NULL if the slot + Nth slot, or null if the slot kind does not involve numerical values @@ -4592,11 +4598,11 @@ Column data values of the appropriate kind for the - Nth slot, or NULL if the slot + Nth slot, or null if the slot kind does not store any data values. Each array's element values are actually of the specific column's data type, so there is no way to define these columns' type more specifically than - anyarray + anyarray. @@ -4810,7 +4816,7 @@ tgattr int2vector pg_attribute.attnum - column numbers, if trigger is column-specific; otherwise an + Column numbers, if trigger is column-specific; otherwise an empty array @@ -4826,7 +4832,7 @@ text Expression tree (in nodeToString() - representation) for the trigger's WHEN condition, or NULL + representation) for the trigger's WHEN condition, or null if none @@ -5319,7 +5325,7 @@ where Datum is 8 bytes). Variable-length types are always passed by reference. Note that typbyval can be false even if the - length would allow pass-by-value + length would allow pass-by-value. @@ -5335,7 +5341,7 @@ e for an enum type, or p for a pseudo-type. See also typrelid and - typbasetype + typbasetype. @@ -5347,7 +5353,7 @@ typcategory is an arbitrary classification of data types that is used by the parser to determine which implicit casts should be preferred. - See + See . @@ -5369,7 +5375,7 @@ True if the type is defined, false if this is a placeholder entry for a not-yet-defined type. When typisdefined is false, nothing - except the type name, namespace, and OID can be relied on + except the type name, namespace, and OID can be relied on. @@ -5380,7 +5386,7 @@ Character that separates two values of this type when parsing array input. Note that the delimiter is associated with the array - element data type, not the array data type + element data type, not the array data type. @@ -5396,7 +5402,7 @@ pg_class entry doesn't really represent a table, but it is needed anyway for the type's pg_attribute entries to link to.) - Zero for non-composite types + Zero for non-composite types. @@ -5418,7 +5424,7 @@ its internal representation must be some number of values of the typelem data type with no other data. Variable-length array types have a header defined by the array - subroutines + subroutines. @@ -5479,7 +5485,7 @@ typanalyze regproc pg_proc.oid - Custom ANALYZE function, or 0 to use the standard function + Custom ANALYZE function, or 0 to use the standard function @@ -5565,7 +5571,7 @@ typnotnull represents a not-null - constraint on a type. Used for domains only + constraint on a type. Used for domains only. @@ -5576,7 +5582,7 @@ If this is a domain (see typtype), then typbasetype identifies the type that this - one is based on. Zero if this type is not a domain + one is based on. Zero if this type is not a domain. @@ -5587,7 +5593,7 @@ Domains use typtypmod to record the typmod to be applied to their base type (-1 if base type does not use a - typmod). -1 if this type is not a domain + typmod). -1 if this type is not a domain. @@ -5600,7 +5606,7 @@ for a domain that is an array (that is, typbasetype is an array type; the domain's typelem will match the base type's typelem). - Zero for types other than domains over array types + Zero for types other than domains over array types. @@ -5611,7 +5617,7 @@ If typdefaultbin is not null, it is the nodeToString() representation of a default expression for the type. This is - only used for domains + only used for domains. @@ -5627,7 +5633,7 @@ typdefaultbin is null and typdefault is not, then typdefault is the external representation of the type's default value, which might be fed to the type's input - converter to produce a constant + converter to produce a constant. @@ -5766,7 +5772,7 @@ text[] - User mapping specific options, as keyword=value strings. + User mapping specific options, as keyword=value strings @@ -6126,7 +6132,7 @@ tablespace name pg_tablespace.spcname - Name of tablespace containing index (NULL if default for database) + Name of tablespace containing index (null if default for database) indexdef @@ -6193,7 +6199,7 @@ text - type of the lockable object: + Type of the lockable object: relation, extend, page, @@ -6212,7 +6218,7 @@ OID of the database in which the object exists, or zero if the object is a shared object, or - NULL if the object is a transaction ID + null if the object is a transaction ID @@ -6220,7 +6226,7 @@ oid pg_class.oid - OID of the relation, or NULL if the object is not + OID of the relation, or null if the object is not a relation or part of a relation @@ -6229,7 +6235,7 @@ integer - Page number within the relation, or NULL if the object + Page number within the relation, or null if the object is not a tuple or relation page @@ -6238,7 +6244,7 @@ smallint - Tuple number within the page, or NULL if the object is not a tuple + Tuple number within the page, or null if the object is not a tuple @@ -6246,7 +6252,7 @@ text - Virtual ID of a transaction, or NULL if the object is not a + Virtual ID of a transaction, or null if the object is not a virtual transaction ID @@ -6255,7 +6261,7 @@ xid - ID of a transaction, or NULL if the object is not a transaction ID + ID of a transaction, or null if the object is not a transaction ID @@ -6263,7 +6269,7 @@ oid pg_class.oid - OID of the system catalog containing the object, or NULL if the + OID of the system catalog containing the object, or null if the object is not a general database object @@ -6272,7 +6278,7 @@ oid any OID column - OID of the object within its system catalog, or NULL if the + OID of the object within its system catalog, or null if the object is not a general database object. For advisory locks it is used to distinguish the two key spaces (1 for an int8 key, 2 for two @@ -6287,7 +6293,7 @@ For a table column, this is the column number (the classid and objid refer to the table itself). For all other object types, this column is - zero. NULL if the object is not a general database object + zero. Null if the object is not a general database object @@ -6304,7 +6310,7 @@ Process ID of the server process holding or awaiting this - lock. NULL if the lock is held by a prepared transaction + lock. Null if the lock is held by a prepared transaction. @@ -6452,7 +6458,7 @@ this is the PREPARE statement submitted by the client. For prepared statements created via the frontend/backend protocol, this is the text of the prepared - statement itself + statement itself. @@ -6469,7 +6475,7 @@ The expected parameter types for the prepared statement in the form of an array of regtype. The OID corresponding to an element of this array can be obtained by casting the - regtype value to oid + regtype value to oid. @@ -6675,7 +6681,7 @@ For roles that can log in, this sets maximum number of concurrent - connections this role can make. -1 means no limit + connections this role can make. -1 means no limit. @@ -6691,7 +6697,7 @@ timestamptz Password expiry time (only used for password authentication); - NULL if no expiration + null if no expiration @@ -6845,19 +6851,19 @@ min_val text - Minimum allowed value of the parameter (NULL for non-numeric + Minimum allowed value of the parameter (null for non-numeric values) max_val text - Maximum allowed value of the parameter (NULL for non-numeric + Maximum allowed value of the parameter (null for non-numeric values) enumvals text[] - Allowed values of an enum parameter (NULL for non-enum + Allowed values of an enum parameter (null for non-enum values) @@ -6875,16 +6881,16 @@ sourcefile text - Configuration file the current value was set in (NULL for + Configuration file the current value was set in (null for values set from sources other than configuration files, or when - examined by a non-superuser). - Helpful when using configuration include directives + examined by a non-superuser); + helpful when using include directives in configuration files sourceline integer Line number within the configuration file the current value was - set at (NULL for values set from sources other than configuration files, + set at (null for values set from sources other than configuration files, or when examined by a non-superuser) @@ -7101,7 +7107,7 @@ likely to increase as the table grows; the positive form is used when the column seems to have a fixed number of possible values.) For example, -1 indicates a unique column in which the number of distinct - values is the same as the number of rows + values is the same as the number of rows. @@ -7110,9 +7116,9 @@ anyarray - A list of the most common values in the column. (NULL if + A list of the most common values in the column. (Null if no values seem to be more common than any others.) - For some datatypes such as tsvector, this is a list of + For some data types such as tsvector, this is a list of the most common element values rather than values of the type itself. @@ -7124,8 +7130,8 @@ A list of the frequencies of the most common values or elements, i.e., number of occurrences of each divided by total number of rows. - (NULL when most_common_vals is.) - For some datatypes such as tsvector, it can also store some + (Null when most_common_vals is.) + For some data types such as tsvector, it can also store some additional information, making it longer than the most_common_vals array. @@ -7139,7 +7145,7 @@ A list of values that divide the column's values into groups of approximately equal population. The values in most_common_vals, if present, are omitted from this - histogram calculation. (This column is NULL if the column data type + histogram calculation. (This column is null if the column data type does not have a < operator or if the most_common_vals list accounts for the entire population.) @@ -7155,7 +7161,7 @@ logical ordering of the column values. This ranges from -1 to +1. When the value is near -1 or +1, an index scan on the column will be estimated to be cheaper than when it is near zero, due to reduction - of random access to the disk. (This column is NULL if the column data + of random access to the disk. (This column is null if the column data type does not have a < operator.) @@ -7220,25 +7226,25 @@ tablespace name pg_tablespace.spcname - Name of tablespace containing table (NULL if default for database) + Name of tablespace containing table (null if default for database) hasindexes boolean pg_class.relhasindex - true if table has (or recently had) any indexes + True if table has (or recently had) any indexes hasrules boolean pg_class.relhasrules - true if table has (or once had) rules + True if table has (or once had) rules hastriggers boolean pg_class.relhastriggers - true if table has (or once had) triggers + True if table has (or once had) triggers @@ -7509,7 +7515,7 @@ User mapping specific options, as keyword=value strings, if the current user is the owner of the foreign - server, else null. + server, else null diff --git a/doc/src/sgml/chkpass.sgml b/doc/src/sgml/chkpass.sgml index 016ba92f86..865300f4e6 100644 --- a/doc/src/sgml/chkpass.sgml +++ b/doc/src/sgml/chkpass.sgml @@ -1,4 +1,4 @@ - + chkpass @@ -32,7 +32,7 @@ passwords without re-encrypting them. If you want the encrypted password without the colon then use the raw() function. This allows you to use the - type with things like Apache's Auth_PostgreSQL module. + type with things like Apache's Auth_PostgreSQL module. @@ -43,7 +43,7 @@ - Note that the chkpass data type is not indexable. + Note that the chkpass data type is not indexable. + citext @@ -62,7 +62,7 @@ SELECT * FROM tab WHERE lower(col) = LOWER(?); lower case characters is dependent on the rules of the LC_CTYPE locale setting. Again, this behavior is identical to the use of lower in queries. But because it's - done transparently by the datatype, you don't have to remember to do + done transparently by the data type, you don't have to remember to do anything special in your queries. @@ -90,8 +90,8 @@ SELECT * FROM users WHERE nick = 'Larry'; The SELECT statement will return one tuple, even though - the nick column was set to larry and the query - was for Larry. + the nick column was set to larry and the query + was for Larry. @@ -184,7 +184,7 @@ SELECT * FROM users WHERE nick = 'Larry'; citext is not as efficient as text because the - operator functions and the btree comparison functions must make copies + operator functions and the B-tree comparison functions must make copies of the data and convert it to lower case for comparisons. It is, however, slightly more efficient than using lower to get case-insensitive matching. diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index d403a10868..44e1f47a7e 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -1,4 +1,4 @@ - + Client Authentication @@ -80,7 +80,7 @@ A record is made up of a number of fields which are separated by spaces and/or tabs. Fields can contain white space if the field value is quoted. - Quoting one of the keywords in a database or username field (e.g., + Quoting one of the keywords in a database or user name field (e.g., all or replication) makes the word lose its special character, and just match a database or user with that name. @@ -578,10 +578,10 @@ local db1,db2,@demodbs all md5 - Username maps + User name maps - Username maps + User name maps @@ -589,10 +589,10 @@ local db1,db2,@demodbs all md5 the name of the operating system user that initiated the connection might not be the same as the database user he needs to connect as. In this case, a user name map can be applied to map the operating system - username to a database user. To use username mapping, specify + user name to a database user. To use user name mapping, specify map=map-name in the options field in pg_hba.conf. This option is - supported for all authentication methods that receive external usernames. + supported for all authentication methods that receive external user names. Since different mappings might be needed for different connections, the name of the map to be used is specified in the map-name parameter in pg_hba.conf @@ -600,7 +600,7 @@ local db1,db2,@demodbs all md5 - Username maps are defined in the ident map file, which by default is named + User name maps are defined in the ident map file, which by default is named pg_ident.confpg_ident.conf and is stored in the cluster's data directory. (It is possible to place the map file @@ -636,13 +636,13 @@ local db1,db2,@demodbs all md5 expression can include a single capture, or parenthesized subexpression, which can then be referenced in the database-username field as \1 (backslash-one). This allows the mapping of - multiple usernames in a single line, which is particularly useful for + multiple user names in a single line, which is particularly useful for simple syntax substitutions. For example, these entries mymap /^(.*)@mydomain\.com$ \1 mymap /^(.*)@otherdomain\.com$ guest - will remove the domain part for users with system usernames that end with + will remove the domain part for users with system user names that end with @mydomain.com, and allow any user whose system name ends with @otherdomain.com to log in as guest. @@ -652,7 +652,7 @@ mymap /^(.*)@otherdomain\.com$ guest Keep in mind that by default, a regular expression can match just part of a string. It's usually wise to use ^ and $, as shown in the above example, to force the match to be to the entire - system username. + system user name. @@ -835,7 +835,7 @@ omicron bryanh guest1 If set to 1, the realm name from the authenticated user principal is included in the system user name that's passed through - username mapping (). This is + user name mapping (). This is useful for handling users from multiple realms. @@ -845,10 +845,10 @@ omicron bryanh guest1 map - Allows for mapping between system and database usernames. See + Allows for mapping between system and database user names. See for details. For a Kerberos principal username/hostbased@EXAMPLE.COM, the - username used for mapping is username/hostbased + user name used for mapping is username/hostbased if include_realm is disabled, and username/hostbased@EXAMPLE.COM if include_realm is enabled. @@ -862,7 +862,7 @@ omicron bryanh guest1 Sets the realm to match user principal names against. If this parameter is set, only users of that realm will be accepted. If it is not set, - users of any realm can connect, subject to whatever username mapping + users of any realm can connect, subject to whatever user name mapping is done. @@ -906,7 +906,7 @@ omicron bryanh guest1 If set to 1, the realm name from the authenticated user principal is included in the system user name that's passed through - username mapping (). This is + user name mapping (). This is useful for handling users from multiple realms. @@ -916,7 +916,7 @@ omicron bryanh guest1 map - Allows for mapping between system and database usernames. See + Allows for mapping between system and database user names. See for details. @@ -928,7 +928,7 @@ omicron bryanh guest1 Sets the realm to match user principal names against. If this parameter is set, only users of that realm will be accepted. If it is not set, - users of any realm can connect, subject to whatever username mapping + users of any realm can connect, subject to whatever user name mapping is done. @@ -1006,13 +1006,13 @@ omicron bryanh guest1 Client principals must have their PostgreSQL database user name as their first component, for example - pgusername@realm. Alternatively, you can use a username + pgusername@realm. Alternatively, you can use a user name mapping to map from the first component of the principal name to the database user name. By default, the realm of the client is not checked by PostgreSQL. If you have cross-realm authentication enabled and need to verify the realm, use the krb_realm parameter, or enable include_realm - and use username mapping to check the realm. + and use user name mapping to check the realm. @@ -1041,7 +1041,7 @@ omicron bryanh guest1 principal matching the requested database user name. For example, for database user name fred, principal fred@EXAMPLE.COM would be able to connect. To also allow - principal fred/users.example.com@EXAMPLE.COM, use a username + principal fred/users.example.com@EXAMPLE.COM, use a user name map, as described in . @@ -1063,7 +1063,7 @@ omicron bryanh guest1 map - Allows for mapping between system and database usernames. See + Allows for mapping between system and database user names. See for details. @@ -1075,7 +1075,7 @@ omicron bryanh guest1 If set to 1, the realm name from the authenticated user principal is included in the system user name that's passed through - username mapping (). This is + user name mapping (). This is useful for handling users from multiple realms. @@ -1087,7 +1087,7 @@ omicron bryanh guest1 Sets the realm to match user principal names against. If this parameter is set, only users of that realm will be accepted. If it is not set, - users of any realm can connect, subject to whatever username mapping + users of any realm can connect, subject to whatever user name mapping is done. @@ -1119,7 +1119,7 @@ omicron bryanh guest1 The ident authentication method works by obtaining the client's operating system user name and using it as the allowed database user - name (with an optional username mapping). + name (with an optional user name mapping). The determination of the client's user name is the security-critical point, and it works differently depending on the connection type, as described below. @@ -1132,7 +1132,7 @@ omicron bryanh guest1 map - Allows for mapping between system and database usernames. See + Allows for mapping between system and database user names. See for details. @@ -1245,7 +1245,7 @@ omicron bryanh guest1 In the second mode, the server first binds to the LDAP directory with - a fixed username and password, specified with ldapbinduser + a fixed user name and password, specified with ldapbinduser and ldapbinddn, and performs a search for the user trying to log in to the database. If no user and password is configured, an anonymous bind will be attempted to the directory. The search will be @@ -1295,7 +1295,7 @@ omicron bryanh guest1 ldapprefix - String to prepend to the username when forming the DN to bind as, + String to prepend to the user name when forming the DN to bind as, when doing simple bind authentication. @@ -1304,7 +1304,7 @@ omicron bryanh guest1 ldapsuffix - String to append to the username when forming the DN to bind as, + String to append to the user name when forming the DN to bind as, when doing simple bind authentication. @@ -1340,7 +1340,7 @@ omicron bryanh guest1 ldapsearchattribute - Attribute to match against the username in the search when doing + Attribute to match against the user name in the search when doing search+bind authentication. @@ -1464,9 +1464,9 @@ ldapserver=ldap.example.net ldapprefix="cn=" ldapsuffix=", dc=example, dc=net" the client provide a valid certificate. No password prompt will be sent to the client. The cn (Common Name) attribute of the certificate - will be compared to the requested database username, and if they match - the login will be allowed. Username mapping can be used to allow - cn to be different from the database username. + will be compared to the requested database user name, and if they match + the login will be allowed. User name mapping can be used to allow + cn to be different from the database user name. @@ -1477,7 +1477,7 @@ ldapserver=ldap.example.net ldapprefix="cn=" ldapsuffix=", dc=example, dc=net" map - Allows for mapping between system and database usernames. See + Allows for mapping between system and database user names. See for details. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 965988eff4..f7a004c238 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1,4 +1,4 @@ - + Server Configuration @@ -243,7 +243,7 @@ SET ENABLE_SEQSCAN TO OFF; Specifies the configuration file for - username mapping + user name mapping (customarily called pg_ident.conf). This parameter can only be set at server start. @@ -1495,7 +1495,7 @@ SET ENABLE_SEQSCAN TO OFF; one transaction is determined by the setting in effect when it commits. It is therefore possible, and useful, to have some transactions commit synchronously and others asynchronously. - For example, to make a single multi-statement transaction commit + For example, to make a single multistatement transaction commit asynchronously when the default is the opposite, issue SET LOCAL synchronous_commit TO OFF within the transaction. @@ -3374,9 +3374,8 @@ local0.* /var/log/postgresql fields to displayed messages. TERSE excludes the logging of DETAIL, HINT, QUERY, and CONTEXT error information. - VERBOSE output includes the SQLSTATE error - code and the source code file name, function name, + VERBOSE output includes the SQLSTATE error + code (see also ) and the source code file name, function name, and line number that generated the error. Only superusers can change this setting. @@ -5404,7 +5403,7 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir' the server. The default value of NAMEDATALEN is 64; therefore the default max_identifier_length is 63 bytes, which - can be less than 63 characters when using multi-byte encodings. + can be less than 63 characters when using multibyte encodings. @@ -5715,7 +5714,7 @@ plruby.use_strict = true # generates error: unknown class name If on, emit information about lock usage. Information dumped includes the type of lock operation, the type of lock and the unique identifier of the object being locked or unlocked. Also included - are bitmasks for the lock types already granted on this object as + are bit masks for the lock types already granted on this object as well as for the lock types awaited on this object. For each lock type a count of the number of granted locks and waiting locks is also dumped as well as the totals. An example of the log file output @@ -5735,7 +5734,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) wait(0) type(INVALID) Details of the structure being dumped may be found in - src/include/storage/lock.h + src/include/storage/lock.h. This parameter is only available if the LOCK_DEBUG @@ -5829,7 +5828,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) If set, dumps information about all current locks when a - DeadLockTimeout occurs. + deadlock timeout occurs. This parameter is only available if the LOCK_DEBUG @@ -5847,7 +5846,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) If set, logs system resource usage statistics (memory and CPU) on - various btree operations. + various B-tree operations. This parameter is only available if the BTREE_BUILD_STATS @@ -5893,7 +5892,7 @@ LOG: CleanUpLock: deleting: lock(0xb7acd844) id(24688,24696,0,0,0,1) to the log. The default is WARNING. Note that LOG has a different rank here than in client_min_messages. - Parameter should be set in the postgresql.conf only. + Parameter should be set in postgresql.conf only. diff --git a/doc/src/sgml/contrib-spi.sgml b/doc/src/sgml/contrib-spi.sgml index 62ef9ef2a2..8f4383734d 100644 --- a/doc/src/sgml/contrib-spi.sgml +++ b/doc/src/sgml/contrib-spi.sgml @@ -1,4 +1,4 @@ - + spi @@ -133,8 +133,8 @@ CREATE TABLE mytab ( set_timetravel() allows you to turn time-travel on or off for a table. - set_timetravel('mytab', 1) will turn TT ON for table mytab. - set_timetravel('mytab', 0) will turn TT OFF for table mytab. + set_timetravel('mytab', 1) will turn TT ON for table mytab. + set_timetravel('mytab', 0) will turn TT OFF for table mytab. In both cases the old status is reported. While TT is off, you can modify the start_date and stop_date columns freely. Note that the on/off status is local to the current database session — fresh sessions will diff --git a/doc/src/sgml/cube.sgml b/doc/src/sgml/cube.sgml index 357f0c6ff7..5da13017cf 100644 --- a/doc/src/sgml/cube.sgml +++ b/doc/src/sgml/cube.sgml @@ -1,4 +1,4 @@ - + cube @@ -9,7 +9,7 @@ This module implements a data type cube for - representing multi-dimensional cubes. + representing multidimensional cubes. @@ -98,7 +98,7 @@ The cube module includes a GiST index operator class for cube values. - The operators supported by the GiST opclass are shown in . + The operators supported by the GiST operator class are shown in .
@@ -140,7 +140,7 @@ respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric - datatypes!) + data types!) @@ -339,7 +339,7 @@ cube_inter In all binary operations on differently-dimensioned cubes, I assume the - lower-dimensional one to be a cartesian projection, i. e., having zeroes + lower-dimensional one to be a Cartesian projection, i. e., having zeroes in place of coordinates omitted in the string representation. The above examples are equivalent to: diff --git a/doc/src/sgml/cvs.sgml b/doc/src/sgml/cvs.sgml index 58061405e2..3836c2ab91 100644 --- a/doc/src/sgml/cvs.sgml +++ b/doc/src/sgml/cvs.sgml @@ -1,4 +1,4 @@ - + @@ -35,14 +35,14 @@ - Using git is the most flexible way to work with the source, and it + Using Git is the most flexible way to work with the source, and it allows you to work offline without having constant access to the project servers. - rsync based cvs also lets you work offline, but - lacks many of the other advantages of git. + rsync based CVS also lets you work offline, but + lacks many of the other advantages of Git. - Our Wiki, and , has additional details on working with CVS and Git. @@ -52,7 +52,7 @@ Getting The Source Via <productname>Git</> - With git you will make a copy of the entire code repository + With Git you will make a copy of the entire code repository to your local machine, so you will have access to all history and branches offline. This is the fastest and most flexible way to develop or test patches. @@ -63,35 +63,35 @@ - You will need an installed version of git, which you can get + You will need an installed version of Git, which you can get from . Many systems also have a recent - version of git installed by default, or available in their + version of Git installed by default, or available in their package repository system. - To being using the git repository, make a clone of the official mirror: + To being using the Git repository, make a clone of the official mirror: git clone git://git.postgresql.org/git/postgresql.git This will copy the full repository to your local machine, so it may take - a while to complete, especially if you have a slow internet connection. + a while to complete, especially if you have a slow Internet connection. - The git mirror can also be reached via the http protocol in case for example - a firewall is blocking access to the git protocol. Just replace the URL + The Git mirror can also be reached via the HTTP protocol in case for example + a firewall is blocking access to the Git protocol. Just replace the URL like: git clone http://git.postgresql.org/git/postgresql.git - The http protocol is less efficient than the git protocol, so it will be + The HTTP protocol is less efficient than the Git protocol, so it will be slightly slower to use. @@ -108,7 +108,7 @@ git fetch - git can do a lot more things than just fetch the source. For + Git can do a lot more things than just fetch the source. For more information, consult the man pages for the product, or the website at . @@ -226,7 +226,7 @@ cvs update CVS repository. To work around that deficiency, use cvsutils, which is packaged in several operating systems, and is available in source form at , or use git + url="http://www.red-bean.com/cvsutils/">, or use Git or another system designed to work offline. @@ -265,7 +265,7 @@ rsync -avzH --delete anoncvs.postgresql.org::pgsql-cvs cvsroot/ For full instructions, see the "rsync" section in the - pgbuildfarm instructions. + PostgreSQL Build Farm instructions. diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 8251b3b4be..f6ec7d38e9 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1,4 +1,4 @@ - + Data Types @@ -3784,7 +3784,7 @@ SELECT to_tsvector('english', 'The Fat Rats'); A tsquery value stores lexemes that are to be - searched for, and combines them honoring the boolean operators + searched for, and combines them honoring the Boolean operators & (AND), | (OR), and ! (NOT). Parentheses can be used to enforce grouping of the operators: diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml index 567782c4ce..3530fc16b5 100644 --- a/doc/src/sgml/dblink.sgml +++ b/doc/src/sgml/dblink.sgml @@ -1,4 +1,4 @@ - + dblink @@ -413,7 +413,7 @@ SELECT * extension.) This allows the system to understand what * should expand to, and what proname in the WHERE clause refers to, in advance of trying - to execute the function. At runtime, an error will be thrown + to execute the function. At run time, an error will be thrown if the actual query result from the remote database does not have the same number of columns shown in the FROM clause. The column names need not match, however, and dblink @@ -1320,7 +1320,7 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex Return Value - Returns setof (notify_name text, be_pid int, extra text), or an empty set if none. + Returns setof (notify_name text, be_pid int, extra text), or an empty set if none. @@ -1615,7 +1615,7 @@ dblink_get_pkey(text relname) returns setof dblink_pkey_results Returns one row for each primary key field, or no rows if the relation - has no primary key. The result rowtype is defined as + has no primary key. The result row type is defined as CREATE TYPE dblink_pkey_results AS (position int, colname text); diff --git a/doc/src/sgml/diskusage.sgml b/doc/src/sgml/diskusage.sgml index aa64e4228e..0c7f544e26 100644 --- a/doc/src/sgml/diskusage.sgml +++ b/doc/src/sgml/diskusage.sgml @@ -1,4 +1,4 @@ - + Monitoring Disk Usage @@ -52,7 +52,7 @@ SELECT pg_relation_filepath(oid), relpages FROM pg_class WHERE relname = 'custom Each page is typically 8 kilobytes. (Remember, relpages is only updated by VACUUM, ANALYZE, and - a few DDL commands such as CREATE INDEX.) The file pathname + a few DDL commands such as CREATE INDEX.) The file path name is of interest if you want to examine the table's disk file directly. diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index 41dfc2abc8..170b78f325 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -1,4 +1,4 @@ - + Documentation @@ -241,7 +241,7 @@ It's possible that the ports do not update the main catalog file in /usr/local/share/sgml/catalog.ports or order - isn't proper . Be sure to have the following lines in begining of file: + isn't proper . Be sure to have the following lines in beginning of file: CATALOG "openjade/catalog" CATALOG "iso8879/catalog" @@ -306,7 +306,7 @@ make install SGML_CATALOG_FILES to point to the file whenever you use jade later on. (This method is also an option if OpenJade is already - installed and you want to install the rest of the tool chain + installed and you want to install the rest of the toolchain locally.) diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml index a732b1d54a..82378de630 100644 --- a/doc/src/sgml/earthdistance.sgml +++ b/doc/src/sgml/earthdistance.sgml @@ -1,4 +1,4 @@ - + earthdistance @@ -12,7 +12,7 @@ calculating great circle distances on the surface of the Earth. The one described first depends on the cube package (which must be installed before earthdistance can be - installed). The second one is based on the built-in point datatype, + installed). The second one is based on the built-in point data type, using longitude and latitude for the coordinates. @@ -38,7 +38,7 @@ The radius of the Earth is obtained from the earth() function. It is given in meters. But by changing this one function you can change the module to use some other units, or to use a different value of - the radius that you feel is more appropiate. + the radius that you feel is more appropriate. diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index dff5d1372b..e70a941838 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1,4 +1,4 @@ - + <application>ECPG</application> - Embedded <acronym>SQL</acronym> in C @@ -825,7 +825,7 @@ numeric *PGTYPESnumeric_from_asc(char *str, char **endptr); 592.49E07 or -32.84e-4. If the value could be parsed successfully, a valid pointer is returned, - else the NULL pointer. At the moment ecpg always parses the complete + else the NULL pointer. At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. @@ -1127,14 +1127,14 @@ date PGTYPESdate_from_timestamp(timestamp dt); date PGTYPESdate_from_asc(char *str, char **endptr); The function receives a C char* string str and a pointer to - a C char* string endptr. At the moment ecpg always parses + a C char* string endptr. At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. Note that the function always assumes MDY-formatted dates and there is - currently no variable to change that within ecpg. + currently no variable to change that within ECPG. shows the allowed input formats. @@ -1401,8 +1401,8 @@ int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); - fmt - result + Format + Result @@ -1464,7 +1464,7 @@ int PGTYPESdate_fmt_asc(date dDate, char *fmtstring, char *outbuf); PGTYPESdate_defmt_asc - Use a format mask to convert a C char* string to a value of type + Use a format mask to convert a C char* string to a value of type date. int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); @@ -1491,9 +1491,9 @@ int PGTYPESdate_defmt_asc(date *d, char *fmt, char *str); - fmt - str - result + Format + String + Result @@ -1592,21 +1592,21 @@ timestamp PGTYPEStimestamp_from_asc(char *str, char **endptr); The function receives the string to parse (str) and a pointer to a C char* (endptr). - At the moment ecpg always parses + At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. The function returns the parsed timestamp on success. On error, - PGTYPESInvalidTimestamp is returned and errno is + PGTYPESInvalidTimestamp is returned and errno is set to PGTYPES_TS_BAD_TIMESTAMP. See for important notes on this value. In general, the input string can contain any combination of an allowed date specification, a whitespace character and an allowed time - specification. Note that timezones are not supported by ecpg. It can + specification. Note that timezones are not supported by ECPG. It can parse them but does not apply any calculation as the PostgreSQL server does for example. Timezone specifiers are silently discarded. @@ -2146,7 +2146,7 @@ interval *PGTYPESinterval_from_asc(char *str, char **endptr); The function parses the input string str and returns a pointer to an allocated interval variable. - At the moment ecpg always parses + At the moment ECPG always parses the complete string and so it currently does not support to store the address of the first invalid character in *endptr. You can safely set endptr to NULL. @@ -2195,7 +2195,7 @@ int PGTYPESinterval_copy(interval *intvlsrc, interval *intvldest); a maximal precision of 30 significant digits. In contrast to the numeric type which can be created on the heap only, the decimal type can be created either on the stack or on the heap (by means of the functions - PGTYPESdecimal_new() and PGTYPESdecimal_free(). There are a lot of other + PGTYPESdecimal_new() and PGTYPESdecimal_free(). There are a lot of other functions that deal with the decimal type in the Informix compatibility mode described in . @@ -2231,7 +2231,7 @@ void PGTYPESdecimal_free(decimal *var); - errno values of pgtypeslib + errno values of pgtypeslib @@ -2351,7 +2351,7 @@ void PGTYPESdecimal_free(decimal *var); - Special constants of pgtypeslib + Special constants of pgtypeslib @@ -2361,7 +2361,7 @@ void PGTYPESdecimal_free(decimal *var); A value of type timestamp representing an invalid time stamp. This is returned by the function PGTYPEStimestamp_from_asc on parse error. - Note that due to the internal representation of the timestamp datatype, + Note that due to the internal representation of the timestamp data type, PGTYPESInvalidTimestamp is also a valid timestamp at the same time. It is set to 1899-12-31 23:59:59. In order to detect errors, make sure that your application does not only test @@ -2421,13 +2421,13 @@ EXEC SQL DEALLOCATE DESCRIPTOR identifier; EXEC SQL FETCH NEXT FROM mycursor INTO SQL DESCRIPTOR mydesc; - If the resultset is empty, the Descriptor Area will still contain + If the result set is empty, the Descriptor Area will still contain the metadata from the query, i.e. the field names. For not yet executed prepared queries, the DESCRIBE - statement can be used to get the metadata of the resultset: + statement can be used to get the metadata of the result set: EXEC SQL BEGIN DECLARE SECTION; char *sql_stmt = "SELECT * FROM table1"; @@ -2449,7 +2449,7 @@ EXEC SQL DESCRIBE stmt1 INTO SQL DESCRIPTOR mydesc; In DESCRIBE and FETCH statements, the INTO and USING keywords can be - used to similarly: they produce the resultset and the metadata in a + used to similarly: they produce the result set and the metadata in a Descriptor Area. @@ -2648,8 +2648,8 @@ EXEC SQL GET DESCRIPTOR mydesc VALUE 1 :id = DATA; An SQLDA Descriptor Area is a C language structure which can be also used - to get the resultset and the metadata of a query. One structure stores one - record from the resultset. + to get the result set and the metadata of a query. One structure stores one + record from the result set. EXEC SQL include sqlda.h; sqlda_t *mysqlda; @@ -2703,7 +2703,7 @@ typedef struct sqlda_struct sqlda_t; The allocated data for an SQLDA structure is variable as it depends on the - number of fields in a resultset and also depends on the length of the string + number of fields in a result set and also depends on the length of the string data values in a record. The individual fields of the SQLDA structure are: @@ -2742,7 +2742,7 @@ typedef struct sqlda_struct sqlda_t; sqld - It contains the number of fields in a resultset. + It contains the number of fields in a result set. @@ -2759,7 +2759,7 @@ typedef struct sqlda_struct sqlda_t; sqlvar - This is the array of the fields in the resultset. The fields are: + This is the array of the fields in the result set. The fields are: @@ -2777,7 +2777,7 @@ typedef struct sqlda_struct sqlda_t; sqllen - It contains the binary length of the field. E.g. 4 bytes for ECPGt_int. + It contains the binary length of the field. E.g. 4 bytes for ECPGt_int. @@ -2851,7 +2851,7 @@ struct sqlname <productname>Informix</productname> compatibility mode - ecpg can be run in a so-called Informix compatibility mode. If + ecpg can be run in a so-called Informix compatibility mode. If this mode is active, it tries to behave as if it were the Informix precompiler for Informix E/SQL. Generally spoken this will allow you to use the dollar sign instead of the EXEC SQL primitive to introduce @@ -2865,20 +2865,20 @@ $COMMIT; - There are two compatibility modes: INFORMIX, INFORMIX_SE + There are two compatibility modes: INFORMIX, INFORMIX_SE When linking programs that use this compatibility mode, remember to link - against libcompat that is shipped with ecpg. + against libcompat that is shipped with ECPG. Besides the previously explained syntactic sugar, the Informix compatibility mode ports some functions for input, output and transformation of data as - well as embedded SQL statements known from E/SQL to ecpg. + well as embedded SQL statements known from E/SQL to ECPG. Informix compatibility mode is closely connected to the pgtypeslib library - of ecpg. pgtypeslib maps SQL data types to data types within the C host + of ECPG. pgtypeslib maps SQL data types to data types within the C host program and most of the additional functions of the Informix compatibility mode allow you to operate on those C host program types. Note however that the extent of the compatibility is limited. It does not try to copy Informix @@ -2888,7 +2888,7 @@ $COMMIT; some of the data types are different. For example, PostgreSQL's datetime and interval types do not know about ranges like for example YEAR TO MINUTE so you won't - find support in ecpg for that either. + find support in ECPG for that either. @@ -2916,7 +2916,7 @@ EXEC SQL FETCH MYCUR INTO :userid; This statement closes the current connection. In fact, this is a - synonym for ecpg's DISCONNECT CURRENT.: + synonym for ECPG's DISCONNECT CURRENT.: $CLOSE DATABASE; /* close the current connection */ EXEC SQL CLOSE DATABASE; @@ -2929,11 +2929,11 @@ EXEC SQL CLOSE DATABASE; Due to the differences how ECPG works compared to Informix's ESQL/C (i.e. which steps - are purely grammar transformations and which steps rely on the underlying runtime library) + are purely grammar transformations and which steps rely on the underlying run-time library) there is no FREE cursor_name statement in ECPG. This is because in ECPG, DECLARE CURSOR doesn't translate to a function call into - the runtime library that uses to the cursor name. This means that there's no runtime - bookkeeping of SQL cursors in the ECPG runtime library, only in the PostgreSQL server. + the run-time library that uses to the cursor name. This means that there's no run-time + bookkeeping of SQL cursors in the ECPG run-time library, only in the PostgreSQL server. @@ -3020,7 +3020,7 @@ typedef struct sqlda_compat sqlda_t; desc_name - Unused, filled with zerobytes. + Unused, filled with zero-bytes. @@ -3038,7 +3038,7 @@ typedef struct sqlda_compat sqlda_t; desc_next - Pointer to the next SQLDA structure if the resultset contains more than one records. + Pointer to the next SQLDA structure if the result set contains more than one records. @@ -3126,7 +3126,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) sqlformat - Reserved in Informix, value of PQfformat() for the field. + Reserved in Informix, value of PQfformat() for the field. @@ -3155,7 +3155,7 @@ if (*(int2 *)sqldata->sqlvar[i].sqlind != 0) sqlxid - Extended type of the field, result of PQftype(). + Extended type of the field, result of PQftype(). @@ -3243,9 +3243,9 @@ int decadd(decimal *arg1, decimal *arg2, decimal *sum); (arg1), a pointer to the second operand of type decimal (arg2) and a pointer to a value of type decimal that will contain the sum (sum). On success, the function returns 0. - ECPG_INFORMIX_NUM_OVERFLOW is returned in case of overflow and - ECPG_INFORMIX_NUM_UNDERFLOW in case of underflow. -1 is returned for - other failures and errno is set to the respective errno number of the + ECPG_INFORMIX_NUM_OVERFLOW is returned in case of overflow and + ECPG_INFORMIX_NUM_UNDERFLOW in case of underflow. -1 is returned for + other failures and errno is set to the respective errno number of the pgtypeslib. @@ -3530,9 +3530,9 @@ int dectoint(decimal *np, int *ip); is returned. - Note that the ecpg implementation differs from the Informix + Note that the ECPG implementation differs from the Informix implementation. Informix limits an integer to the range from -32767 to - 32767, while the limits in the ecpg implementation depend on the + 32767, while the limits in the ECPG implementation depend on the architecture (-INT_MAX .. INT_MAX). @@ -3556,9 +3556,9 @@ int dectolong(decimal *np, long *lngp); is returned. - Note that the ecpg implementation differs from the Informix + Note that the ECPG implementation differs from the Informix implementation. Informix limits a long integer to the range from - -2,147,483,647 to 2,147,483,647, while the limits in the ecpg + -2,147,483,647 to 2,147,483,647, while the limits in the ECPG implementation depend on the architecture (-LONG_MAX .. LONG_MAX). @@ -3584,9 +3584,9 @@ int rdatestr(date d, char *str); error. - Note that ecpg's implementation differs from the Informix + Note that ECPG's implementation differs from the Informix implementation. In Informix the format can be influenced by setting - environment variables. In ecpg however, you cannot change the output + environment variables. In ECPG however, you cannot change the output format. @@ -5129,14 +5129,14 @@ EXEC SQL UNDEF MYNUMBER; Of course you can continue to use the C versions #define and #undef in your embedded SQL program. The difference is where your defined values get evaluated. If you use EXEC SQL - DEFINE then the ecpg preprocessor evaluates the defines and substitutes + DEFINE then the ecpg preprocessor evaluates the defines and substitutes the values. For example if you write: EXEC SQL DEFINE MYNUMBER 12; ... EXEC SQL UPDATE Tbl SET col = MYNUMBER; - then ecpg will already do the substitution and your C compiler will never + then ecpg will already do the substitution and your C compiler will never see any name or identifier MYNUMBER. Note that you cannot use #define for a constant that you are going to use in an embedded SQL query because in this case the embedded SQL precompiler is not @@ -5145,7 +5145,7 @@ EXEC SQL UPDATE Tbl SET col = MYNUMBER; - ifdef, ifndef, else, elif and endif directives + ifdef, ifndef, else, elif, and endif directives You can use the following directives to compile code sections conditionally: diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml index 7e647cc985..c7afbb2ef7 100644 --- a/doc/src/sgml/external-projects.sgml +++ b/doc/src/sgml/external-projects.sgml @@ -1,4 +1,4 @@ - + External Projects @@ -45,7 +45,7 @@ - ecpg is included because it depends on the + ECPG is included because it depends on the server-side SQL grammar, and is therefore sensitive to changes in PostgreSQL itself. diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 28e7a68d87..042de5b6e1 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -1,4 +1,4 @@ - + Functions and Operators @@ -9497,7 +9497,7 @@ SELECT NULLIF(value, '(none)') ... Array comparisons compare the array contents element-by-element, - using the default B-Tree comparison function for the element data type. + using the default B-tree comparison function for the element data type. In multidimensional arrays the elements are visited in row-major order (last subscript varies most rapidly). If the contents of two arrays are equal but the dimensionality is @@ -9988,8 +9988,8 @@ SELECT NULLIF(value, '(none)') ... SELECT b1 = ANY((SELECT b2 FROM t2 ...)) FROM t1 ...; Here ANY can be considered either as introducing - a subquery, or as being an aggregate function, if the sub-select - returns one row with a boolean value. + a subquery, or as being an aggregate function, if the subquery + returns one row with a Boolean value. Thus the standard name cannot be given to these aggregates. @@ -11224,8 +11224,8 @@ AND > or >=, or has semantics similar to one of these. (To be specific, an operator - can be a row comparison operator if it is a member of a B-Tree operator - class, or is the negator of the = member of a B-Tree operator + can be a row comparison operator if it is a member of a B-tree operator + class, or is the negator of the = member of a B-tree operator class.) @@ -13199,7 +13199,7 @@ postgres=# select pg_start_backup('label_goes_here'); 0/D4445B8 (1 row) - There is an optional boolean second parameter. If true, + There is an optional second parameter of type boolean. If true, it specifies executing pg_start_backup as quickly as possible. This forces an immediate checkpoint which will cause a spike in I/O operations, slowing any concurrently executing queries. @@ -13576,7 +13576,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); pg_relation_filepath is similar to - pg_relation_filenode, but it returns the entire file pathname + pg_relation_filenode, but it returns the entire file path name (relative to the database cluster's data directory PGDATA) of the relation. diff --git a/doc/src/sgml/fuzzystrmatch.sgml b/doc/src/sgml/fuzzystrmatch.sgml index d8dc9c70b2..3d27740444 100644 --- a/doc/src/sgml/fuzzystrmatch.sgml +++ b/doc/src/sgml/fuzzystrmatch.sgml @@ -1,4 +1,4 @@ - + fuzzystrmatch @@ -15,7 +15,7 @@ At present, fuzzystrmatch does not work well with - multi-byte encodings (such as UTF-8). + multibyte encodings (such as UTF-8). diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml index 2241dd5d9a..5318e8295a 100644 --- a/doc/src/sgml/gin.sgml +++ b/doc/src/sgml/gin.sgml @@ -1,4 +1,4 @@ - + GIN Indexes @@ -62,11 +62,10 @@ The four methods that an operator class for GIN must provide are: - - int compare(Datum a, Datum b) + int compare(Datum a, Datum b) Compares keys (not indexed values!) and returns an integer less than @@ -77,7 +76,7 @@ - Datum *extractValue(Datum inputValue, int32 *nkeys) + Datum *extractValue(Datum inputValue, int32 *nkeys) Returns an array of keys given a value to be indexed. The @@ -87,8 +86,8 @@ - Datum *extractQuery(Datum query, int32 *nkeys, - StrategyNumber n, bool **pmatch, Pointer **extra_data) + Datum *extractQuery(Datum query, int32 *nkeys, + StrategyNumber n, bool **pmatch, Pointer **extra_data) Returns an array of keys given a value to be queried; that is, @@ -109,7 +108,7 @@ so the index scan can be skipped entirely. pmatch is an output argument for use when partial match is supported. To use it, extractQuery must allocate - an array of *nkeys booleans and store its address at + an array of *nkeys Booleans and store its address at *pmatch. Each element of the array should be set to TRUE if the corresponding key requires partial match, FALSE if not. If *pmatch is set to NULL then GIN assumes partial match @@ -133,8 +132,8 @@ - bool consistent(bool check[], StrategyNumber n, Datum query, - int32 nkeys, Pointer extra_data[], bool *recheck) + bool consistent(bool check[], StrategyNumber n, Datum query, + int32 nkeys, Pointer extra_data[], bool *recheck) Returns TRUE if the indexed value satisfies the query operator with @@ -156,19 +155,15 @@ - - Optionally, an operator class for GIN can supply a fifth method: - - - int comparePartial(Datum partial_key, Datum key, StrategyNumber n, - Pointer extra_data) + int comparePartial(Datum partial_key, Datum key, StrategyNumber n, + Pointer extra_data) Compare a partial-match query to an index key. Returns an integer @@ -184,8 +179,8 @@ - + To support partial match queries, an operator class must @@ -408,37 +403,37 @@ feature. The following contrib modules also contain GIN operator classes: - - btree-gin + btree_gin - B-Tree equivalent functionality for several data types + B-tree equivalent functionality for several data types - hstore + hstore Module for storing (key, value) pairs - intarray + intarray - Enhanced support for int4[] + Enhanced support for int[] - pg_trgm + pg_trgm Text similarity using trigram matching + diff --git a/doc/src/sgml/gist.sgml b/doc/src/sgml/gist.sgml index eddaaad5df..5757ce8002 100644 --- a/doc/src/sgml/gist.sgml +++ b/doc/src/sgml/gist.sgml @@ -1,4 +1,4 @@ - + GiST Indexes @@ -102,7 +102,7 @@ decompress, which allow an index to have internal tree data of a different type than the data it indexes. The leaves are to be of the indexed data type, while the other tree nodes can be of any C struct (but - you still have to follow PostgreSQL datatype rules here, + you still have to follow PostgreSQL data type rules here, see about varlena for variable sized data). If the tree's internal data type exists at the SQL level, the STORAGE option of the CREATE OPERATOR CLASS command can be used. @@ -561,7 +561,7 @@ my_same(PG_FUNCTION_ARGS) For historical reasons, the same function doesn't - just return a boolean result; instead it has to store the flag + just return a Boolean result; instead it has to store the flag at the location indicated by the third argument. @@ -583,58 +583,58 @@ my_same(PG_FUNCTION_ARGS) (see src/backend/access/gist/gistproc.c). The following contrib modules also contain GiST operator classes: - - btree_gist + btree_gist - B-Tree equivalent functionality for several data types + B-tree equivalent functionality for several data types - cube + cube Indexing for multidimensional cubes - hstore + hstore Module for storing (key, value) pairs - intarray + intarray RD-Tree for one-dimensional array of int4 values - ltree + ltree Indexing for tree-like structures - pg_trgm + pg_trgm Text similarity using trigram matching - seg + seg Indexing for float ranges + diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index c453c8a794..8067e57b2a 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -1,4 +1,4 @@ - + High Availability, Load Balancing, and Replication @@ -449,7 +449,7 @@ protocol to make nodes agree on a serializable transactional order. part of the query and return results to a central server where they are combined and returned to the user. Pgpool-II has this capability. Also, this can be implemented using the - PL/Proxy toolset. + PL/Proxy tool set. @@ -602,21 +602,21 @@ protocol to make nodes agree on a serializable transactional order. At startup, the standby begins by restoring all WAL available in the archive location, calling restore_command. Once it reaches the end of WAL available there and restore_command - fails, it tries to restore any WAL available in the pg_xlog directory. + fails, it tries to restore any WAL available in the pg_xlog directory. If that fails, and streaming replication has been configured, the standby tries to connect to the primary server and start streaming WAL - from the last valid record found in archive or pg_xlog. If that fails + from the last valid record found in archive or pg_xlog. If that fails or streaming replication is not configured, or if the connection is later disconnected, the standby goes back to step 1 and tries to restore the file from the archive again. This loop of retries from the - archive, pg_xlog, and via streaming replication goes on until the server + archive, pg_xlog, and via streaming replication goes on until the server is stopped or failover is triggered by a trigger file. Standby mode is exited and the server switches to normal operation, when a trigger file is found (trigger_file). Before failover, - any WAL immediately available in the archive or in pg_xlog will be + any WAL immediately available in the archive or in pg_xlog will be restored, but no attempt is made to connect to the master. @@ -753,7 +753,7 @@ trigger_file = '/path/to/trigger_file' too early, while the standby might still need them to catch up. If the standby falls behind too much, it needs to be reinitialized from a new base backup. If you set up a WAL archive that's accessible from the - standby, wal_keep_segments is not required as the standby can always + standby, wal_keep_segments is not required as the standby can always use the archive to catch up. @@ -1743,7 +1743,7 @@ LOG: database system is ready to accept read only connections - New oids cannot be assigned, though some UUID generators may still + New OIDs cannot be assigned, though some UUID generators may still work as long as they do not rely on writing new status to the database. diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml index 2ce825c2ea..a6aad2cc61 100644 --- a/doc/src/sgml/hstore.sgml +++ b/doc/src/sgml/hstore.sgml @@ -1,4 +1,4 @@ - + hstore @@ -214,7 +214,7 @@ key => NULL and <@ were called @ and ~, respectively. These names are still available, but are deprecated and will eventually be removed. Notice that the old names are reversed from the - convention formerly followed by the core geometric datatypes! + convention formerly followed by the core geometric data types! @@ -409,7 +409,7 @@ b The function populate_record is actually declared with anyelement, not record, as its first argument, - but it will reject non-record types with a runtime error. + but it will reject non-record types with a run-time error. diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index b132fff164..a17a4fd94c 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -1,4 +1,4 @@ - + Index Access Method Interface Definition @@ -189,7 +189,7 @@ aminsert (Relation indexRelation, - The function's boolean result value is significant only when + The function's Boolean result value is significant only when checkUnique is UNIQUE_CHECK_PARTIAL. In this case a TRUE result means the new entry is known unique, whereas FALSE means it might be non-unique (and a deferred uniqueness check must @@ -366,7 +366,7 @@ amgetbitmap (IndexScanDesc scan, TIDBitmap *tbm); Fetch all tuples in the given scan and add them to the caller-supplied - TIDBitmap (that is, OR the set of tuple IDs into whatever set is already + TIDBitmap (that is, OR the set of tuple IDs into whatever set is already in the bitmap). The number of tuples fetched is returned (this might be just an approximate count, for instance some AMs do not detect duplicates). While inserting tuple IDs into the bitmap, amgetbitmap can @@ -820,19 +820,19 @@ amrestrpos (IndexScanDesc scan); Index Cost Estimation Functions - The amcostestimate function is given a list of WHERE clauses that have + The amcostestimate function is given a list of WHERE clauses that have been determined to be usable with the index. It must return estimates of the cost of accessing the index and the selectivity of the WHERE clauses (that is, the fraction of parent-table rows that will be retrieved during the index scan). For simple cases, nearly all the work of the cost estimator can be done by calling standard routines - in the optimizer; the point of having an amcostestimate function is + in the optimizer; the point of having an amcostestimate function is to allow index access methods to provide index-type-specific knowledge, in case it is possible to improve on the standard estimates. - Each amcostestimate function must have the signature: + Each amcostestimate function must have the signature: void @@ -850,7 +850,7 @@ amcostestimate (PlannerInfo *root, - root + root The planner's information about the query being processed. @@ -859,7 +859,7 @@ amcostestimate (PlannerInfo *root, - index + index The index being considered. @@ -868,23 +868,23 @@ amcostestimate (PlannerInfo *root, - indexQuals + indexQuals List of index qual clauses (implicitly ANDed); - a NIL list indicates no qualifiers are available. + a NIL list indicates no qualifiers are available. Note that the list contains expression trees, not ScanKeys. - outer_rel + outer_rel If the index is being considered for use in a join inner indexscan, the planner's information about the outer side of the join. Otherwise - NULL. When non-NULL, some of the qual clauses will be join clauses + NULL. When non-NULL, some of the qual clauses will be join clauses with this rel rather than being simple restriction clauses. Also, the cost estimator should expect that the index scan will be repeated for each row of the outer rel. @@ -899,7 +899,7 @@ amcostestimate (PlannerInfo *root, - *indexStartupCost + *indexStartupCost Set to cost of index start-up processing @@ -908,7 +908,7 @@ amcostestimate (PlannerInfo *root, - *indexTotalCost + *indexTotalCost Set to total cost of index processing @@ -917,7 +917,7 @@ amcostestimate (PlannerInfo *root, - *indexSelectivity + *indexSelectivity Set to index selectivity @@ -926,7 +926,7 @@ amcostestimate (PlannerInfo *root, - *indexCorrelation + *indexCorrelation Set to correlation coefficient between index scan order and @@ -951,7 +951,7 @@ amcostestimate (PlannerInfo *root, row should usually be taken as cpu_index_tuple_cost. In addition, an appropriate multiple of cpu_operator_cost should be charged for any comparison operators invoked during index processing - (especially evaluation of the indexQuals themselves). + (especially evaluation of the indexQuals themselves). @@ -968,14 +968,14 @@ amcostestimate (PlannerInfo *root, - The indexSelectivity should be set to the estimated fraction of the parent + The indexSelectivity should be set to the estimated fraction of the parent table rows that will be retrieved during the index scan. In the case of a lossy query, this will typically be higher than the fraction of rows that actually pass the given qual conditions. - The indexCorrelation should be set to the correlation (ranging between + The indexCorrelation should be set to the correlation (ranging between -1.0 and 1.0) between the index order and the table order. This is used to adjust the estimate for the cost of fetching rows from the parent table. @@ -1009,16 +1009,16 @@ amcostestimate (PlannerInfo *root, Estimate the number of index rows that will be visited during the - scan. For many index types this is the same as indexSelectivity times + scan. For many index types this is the same as indexSelectivity times the number of rows in the index, but it might be more. (Note that the - index's size in pages and rows is available from the IndexOptInfo struct.) + index's size in pages and rows is available from the IndexOptInfo struct.) Estimate the number of index pages that will be retrieved during the scan. - This might be just indexSelectivity times the index's size in pages. + This might be just indexSelectivity times the index's size in pages. diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml index 6f2887bd16..a28f1dba51 100644 --- a/doc/src/sgml/indices.sgml +++ b/doc/src/sgml/indices.sgml @@ -1,4 +1,4 @@ - + Indexes @@ -92,9 +92,9 @@ CREATE INDEX test1_id_index ON test1 (id); Creating an index on a large table can take a long time. By default, - PostgreSQL allows reads (selects) to occur - on the table in parallel with index creation, but writes (INSERTs, - UPDATEs, DELETEs) are blocked until the index build is finished. + PostgreSQL allows reads (SELECT statements) to occur + on the table in parallel with index creation, but writes (INSERT, + UPDATE, DELETE) are blocked until the index build is finished. In production environments this is often unacceptable. It is possible to allow writes to occur in parallel with index creation, but there are several caveats to be aware of — diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 79a1ce4696..1e314242fa 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -1,4 +1,4 @@ - + <![%standalone-include[<productname>PostgreSQL</>]]> @@ -2471,7 +2471,7 @@ createlang: language installation failed: ERROR: could not load library "/opt/d <listitem> <para> - The GNU make command is called "make" not "gmake". + The GNU make command is called <command>make</command>, not <command>gmake</command>. </para> </listitem> @@ -2982,7 +2982,7 @@ configure ... LDFLAGS="-R /usr/sfw/lib:/opt/sfw/lib:/usr/local/lib" On Solaris 7 and older, the 64-bit version of libc has a buggy <function>vsnprintf</function> routine, which leads to erratic core dumps in PostgreSQL. The simplest known workaround is to - force PostgreSQL to use its own version of vsnprintf rather than + force PostgreSQL to use its own version of <function>vsnprintf</function> rather than the library copy. To do this, after you run <command>configure</command> edit a file produced by <command>configure</command>: @@ -3044,7 +3044,7 @@ LIBOBJS = snprintf.o </para> <para> - If you see the linking of the postgres executable abort with an + If you see the linking of the <command>postgres</command> executable abort with an error message like: <screen> Undefined first referenced diff --git a/doc/src/sgml/intarray.sgml b/doc/src/sgml/intarray.sgml index 0f304a403f..43a882ea0d 100644 --- a/doc/src/sgml/intarray.sgml +++ b/doc/src/sgml/intarray.sgml @@ -1,4 +1,4 @@ -<!-- $PostgreSQL: pgsql/doc/src/sgml/intarray.sgml,v 1.10.2.1 2010/07/29 19:34:36 petere Exp $ --> +<!-- $PostgreSQL: pgsql/doc/src/sgml/intarray.sgml,v 1.10.2.2 2010/08/17 04:37:17 petere Exp $ --> <sect1 id="intarray"> <title>intarray @@ -207,7 +207,7 @@ <@ were respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention - formerly followed by the core geometric datatypes!) + formerly followed by the core geometric data types!) diff --git a/doc/src/sgml/isn.sgml b/doc/src/sgml/isn.sgml index 976c3ce8a6..53a6f5f788 100644 --- a/doc/src/sgml/isn.sgml +++ b/doc/src/sgml/isn.sgml @@ -1,4 +1,4 @@ - + isn @@ -214,7 +214,7 @@ The isn module provides the standard comparison operators, - plus btree and hash indexing support for all these datatypes. In + plus B-tree and hash indexing support for all these data types. In addition there are several specialized functions; shown in . In this table, isn means any one of the module's data types. @@ -375,7 +375,7 @@ SELECT isbn13(id) FROM test; This module was inspired by Garrett A. Wollman's - isbn_issn code. + isbn_issn code. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index d24ace0020..7d03a01b34 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -1,4 +1,4 @@ - + <application>libpq</application> - C Library @@ -79,13 +79,13 @@ On Windows, there is a way to improve performance if a single database connection is repeatedly started and shutdown. Internally, - libpq calls WSAStartup() and WSACleanup() for connection startup - and shutdown, respectively. WSAStartup() increments an internal - Windows library reference count which is decremented by WSACleanup(). - When the reference count is just one, calling WSACleanup() frees + libpq calls WSAStartup() and WSACleanup() for connection startup + and shutdown, respectively. WSAStartup() increments an internal + Windows library reference count which is decremented by WSACleanup(). + When the reference count is just one, calling WSACleanup() frees all resources and all DLLs are unloaded. This is an expensive operation. To avoid this, an application can manually call - WSAStartup() so resources will not be freed when the last database + WSAStartup() so resources will not be freed when the last database connection is closed. @@ -405,7 +405,7 @@ PGconn *PQconnectdbParams(const char **keywords, const char **values, int expand verify-full only try an SSL connection, verify that the server certificate is issued by a trusted CA and - that the server hostname matches that in the certificate + that the server host name matches that in the certificate @@ -471,7 +471,7 @@ PGconn *PQconnectdbParams(const char **keywords, const char **values, int expand This parameter specifies the location for the secret key used for - the client certificate. It can either specify a filename that will + the client certificate. It can either specify a file name that will be used instead of the default ~/.postgresql/postgresql.key, or it can specify a key obtained from an external engine (engines are @@ -931,7 +931,7 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); Parses a connection string and returns the resulting options as an - array; or returns NULL if there is a problem with the connection + array; or returns NULL if there is a problem with the connection string. This can be used to determine the PQconnectdb options in the provided connection string. The return value points to an array of @@ -945,10 +945,10 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); - If errmsg is not NULL, then *errmsg is set - to NULL on success, else to a malloc'd error string explaining + If errmsg is not NULL, then *errmsg is set + to NULL on success, else to a malloc'd error string explaining the problem. (It is also possible for *errmsg to be - set to NULL even when NULL is returned; this indicates an out-of-memory + set to NULL even when NULL is returned; this indicates an out-of-memory situation.) @@ -956,7 +956,7 @@ PQconninfoOption *PQconninfoParse(const char *conninfo, char **errmsg); After processing the options array, free it by passing it to PQconninfoFree. If this is not done, some memory is leaked for each call to PQconninfoParse. - Conversely, if an error occurs and errmsg is not NULL, + Conversely, if an error occurs and errmsg is not NULL, be sure to free the error string using PQfreemem. @@ -1952,7 +1952,7 @@ PGresult *PQdescribePrepared(PGconn *conn, const char *stmtName); - stmtName can be "" or NULL to reference + stmtName can be "" or NULL to reference the unnamed statement, otherwise it must be the name of an existing prepared statement. On success, a PGresult with status PGRES_COMMAND_OK is returned. The @@ -1995,7 +1995,7 @@ PGresult *PQdescribePortal(PGconn *conn, const char *portalName); - portalName can be "" or NULL to reference + portalName can be "" or NULL to reference the unnamed portal, otherwise it must be the name of an existing portal. On success, a PGresult with status PGRES_COMMAND_OK is returned. The functions @@ -3116,7 +3116,7 @@ char *PQescapeLiteral(PGconn *conn, const char *str, size_t length); - On error, PQescapeLiteral returns NULL and a suitable + On error, PQescapeLiteral returns NULL and a suitable message is stored in the conn object. @@ -3179,7 +3179,7 @@ char *PQescapeIdentifier(PGconn *conn, const char *str, size_t length); - On error, PQescapeIdentifier returns NULL and a suitable + On error, PQescapeIdentifier returns NULL and a suitable message is stored in the conn object. @@ -3232,13 +3232,13 @@ size_t PQescapeStringConn(PGconn *conn, - If the error parameter is not NULL, then + If the error parameter is not NULL, then *error is set to zero on success, nonzero on error. Presently the only possible error conditions involve invalid multibyte encoding in the source string. The output string is still generated on error, but it can be expected that the server will reject it as malformed. On error, a suitable message is stored in the - conn object, whether or not error is NULL. + conn object, whether or not error is NULL. @@ -3343,7 +3343,7 @@ unsigned char *PQescapeByteaConn(PGconn *conn, - On error, a NULL pointer is returned, and a suitable error message + On error, a null pointer is returned, and a suitable error message is stored in the conn object. Currently, the only possible error is insufficient memory for the result string. @@ -3416,7 +3416,7 @@ unsigned char *PQunescapeBytea(const unsigned char *from, size_t *to_length); to a bytea column. PQunescapeBytea converts this string representation into its binary representation. It returns a pointer to a buffer allocated with - malloc(), or NULL on error, and puts the size of + malloc(), or NULL on error, and puts the size of the buffer in to_length. The result must be freed using PQfreemem when it is no longer needed. @@ -3971,7 +3971,7 @@ PGcancel *PQgetCancel(PGconn *conn); PQgetCancel creates a PGcancelPGcancel object given a PGconn connection object. It will return - NULL if the given conn is NULL or an invalid + NULL if the given conn is NULL or an invalid connection. The PGcancel object is an opaque structure that is not meant to be accessed directly by the application; it can only be passed to PQcancel @@ -5056,7 +5056,7 @@ PGresult *PQmakeEmptyPGresult(PGconn *conn, ExecStatusType status); This is libpq's internal function to allocate and initialize an empty PGresult object. This - function returns NULL if memory could not be allocated. It is + function returns NULL if memory could not be allocated. It is exported because some applications find it useful to generate result objects (particularly objects with error status) themselves. If conn is not null and status @@ -5095,7 +5095,7 @@ int PQfireResultCreateEvents(PGconn *conn, PGresult *res); The conn argument is passed through to event procedures - but not used directly. It can be NULL if the event + but not used directly. It can be NULL if the event procedures won't use it. @@ -5127,7 +5127,7 @@ int PQfireResultCreateEvents(PGconn *conn, PGresult *res); Makes a copy of a PGresult object. The copy is not linked to the source result in any way and PQclear must be called when the copy is no longer - needed. If the function fails, NULL is returned. + needed. If the function fails, NULL is returned. PGresult *PQcopyResult(const PGresult *src, int flags); @@ -5171,7 +5171,7 @@ int PQsetResultAttrs(PGresult *res, int numAttributes, PGresAttDesc *attDescs); The provided attDescs are copied into the result. - If the attDescs pointer is NULL or + If the attDescs pointer is NULL or numAttributes is less than one, the request is ignored and the function succeeds. If res already contains attributes, the function will fail. If the function @@ -5205,8 +5205,8 @@ int PQsetvalue(PGresult *res, int tup_num, int field_num, char *value, int len); field of any existing tuple can be modified in any order. If a value at field_num already exists, it will be overwritten. If len is -1 or - value is NULL, the field value - will be set to an SQL NULL. The + value is NULL, the field value + will be set to an SQL null value. The value is copied into the result's private storage, thus is no longer needed after the function returns. If the function fails, the return value is zero. If the @@ -5234,7 +5234,7 @@ void *PQresultAlloc(PGresult *res, size_t nBytes); Any memory allocated with this function will be freed when res is cleared. If the function fails, - the return value is NULL. The result is + the return value is NULL. The result is guaranteed to be adequately aligned for any type of data, just as for malloc. @@ -5377,7 +5377,7 @@ defaultNoticeProcessor(void *arg, const char *message) life of the PGconn and all PGresults generated from it; so if used, it must point to long-lived data. In addition there is an instance data pointer, which starts - out NULL in every PGconn and PGresult. + out NULL in every PGconn and PGresult. This pointer can be manipulated using the PQinstanceData, PQsetInstanceData, @@ -5682,11 +5682,11 @@ int PQregisterEventProc(PGconn *conn, PGEventProc proc, event is fired. Its memory address is also used to lookup instanceData. The name argument is used to refer to the event procedure in error messages. - This value cannot be NULL or a zero-length string. The name string is + This value cannot be NULL or a zero-length string. The name string is copied into the PGconn, so what is passed need not be long-lived. The passThrough pointer is passed to the proc whenever an event occurs. This - argument can be NULL. + argument can be NULL. @@ -5700,9 +5700,11 @@ int PQregisterEventProc(PGconn *conn, PGEventProc proc, - Sets the conn's instanceData for proc to data. This returns non-zero - for success and zero for failure. (Failure is only possible if - the proc has not been properly registered in the conn.) + Sets the connection conn's instanceData + for procedure proc to data. This + returns non-zero for success and zero for failure. (Failure is + only possible if proc has not been properly + registered in conn.) int PQsetInstanceData(PGconn *conn, PGEventProc proc, void *data); @@ -5720,8 +5722,10 @@ int PQsetInstanceData(PGconn *conn, PGEventProc proc, void *data); - Returns the conn's instanceData associated with proc, or NULL - if there is none. + Returns the + connection conn's instanceData + associated with procedure proc, + or NULL if there is none. void *PQinstanceData(const PGconn *conn, PGEventProc proc); @@ -5739,9 +5743,11 @@ void *PQinstanceData(const PGconn *conn, PGEventProc proc); - Sets the result's instanceData for proc to data. This returns non-zero - for success and zero for failure. (Failure is only possible if the - proc has not been properly registered in the result.) + Sets the result's instanceData + for proc to data. This returns + non-zero for success and zero for failure. (Failure is only + possible if proc has not been properly registered + in the result.) int PQresultSetInstanceData(PGresult *res, PGEventProc proc, void *data); @@ -5759,7 +5765,7 @@ int PQresultSetInstanceData(PGresult *res, PGEventProc proc, void *data); - Returns the result's instanceData associated with proc, or NULL + Returns the result's instanceData associated with proc, or NULL if there is none. @@ -6481,7 +6487,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) libpq will verify that the server is trustworthy by checking the certificate chain up to a trusted certificate authority (CA). If sslmode is set to verify-full, - libpq will also verify that the server hostname matches its + libpq will also verify that the server host name matches its certificate. The SSL connection will fail if the server certificate cannot be verified. verify-full is recommended in most security-sensitive environments. @@ -6489,11 +6495,11 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) In verify-full mode, the cn (Common Name) attribute - of the certificate is matched against the hostname. If the cn + of the certificate is matched against the host name. If the cn attribute starts with an asterisk (*), it will be treated as a wildcard, and will match all characters except a dot (.). This means the certificate will not match subdomains. - If the connection is made using an IP address instead of a hostname, the + If the connection is made using an IP address instead of a host name, the IP address will be matched (without doing any DNS lookups). @@ -6585,7 +6591,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) Eavesdropping If a third party can examine the network traffic between the client and the server, it can read both connection information (including - the username and password) and the data that is passed. SSL + the user name and password) and the data that is passed. SSL uses encryption to prevent this. diff --git a/doc/src/sgml/lo.sgml b/doc/src/sgml/lo.sgml index 2015762b9d..0e11652088 100644 --- a/doc/src/sgml/lo.sgml +++ b/doc/src/sgml/lo.sgml @@ -1,4 +1,4 @@ - + lo @@ -19,7 +19,7 @@ One of the problems with the JDBC driver (and this affects the ODBC driver also), is that the specification assumes that references to BLOBs (Binary - Large OBjects) are stored within a table, and if that entry is changed, the + Large Objects) are stored within a table, and if that entry is changed, the associated BLOB is deleted from the database. diff --git a/doc/src/sgml/ltree.sgml b/doc/src/sgml/ltree.sgml index e405b6c475..258f703a6d 100644 --- a/doc/src/sgml/ltree.sgml +++ b/doc/src/sgml/ltree.sgml @@ -1,4 +1,4 @@ - + ltree @@ -42,7 +42,7 @@ - The ltree module provides several datatypes: + The ltree module provides several data types: diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 3c94a9ccdd..befe437db5 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -1,4 +1,4 @@ - + Monitoring Database Activity @@ -693,7 +693,7 @@ postgres: user database host pg_stat_get_activity(integer) setof record - Returns a record of information about the backend with the specified pid, or + Returns a record of information about the backend with the specified PID, or one record for each active backend in the system if NULL is specified. The fields returned are a subset of those in the pg_stat_activity view. diff --git a/doc/src/sgml/oid2name.sgml b/doc/src/sgml/oid2name.sgml index f490c2bd13..6cfa2d6e6b 100644 --- a/doc/src/sgml/oid2name.sgml +++ b/doc/src/sgml/oid2name.sgml @@ -1,4 +1,4 @@ - + oid2name @@ -105,7 +105,7 @@ username - username to connect as + user name to connect as @@ -122,8 +122,8 @@ using @@ -385,7 +385,7 @@ pgbench options dbname login - The username to connect as + The user name to connect as @@ -434,7 +434,7 @@ pgbench options dbname - The format of a script file is one SQL command per line; multi-line + The format of a script file is one SQL command per line; multiline SQL commands are not supported. Empty lines and lines beginning with -- are ignored. Script file lines can also be meta commands, which are interpreted by pgbench @@ -613,7 +613,7 @@ END; With the -l option, pgbench writes the time - taken by each transaction to a logfile. The logfile will be named + taken by each transaction to a log file. The log file will be named pgbench_log.nnn, where nnn is the PID of the pgbench process. If the -j option is 2 or higher, creating multiple worker diff --git a/doc/src/sgml/pgcrypto.sgml b/doc/src/sgml/pgcrypto.sgml index dc0cd643bc..8c37e9e5f0 100644 --- a/doc/src/sgml/pgcrypto.sgml +++ b/doc/src/sgml/pgcrypto.sgml @@ -1,4 +1,4 @@ - + pgcrypto @@ -519,7 +519,7 @@ pgp_sym_decrypt_bytea(msg bytea, psw text [, options text ]) returns bytea Decrypt a symmetric-key-encrypted PGP message. - Decrypting bytea data with pgp_sym_decrypt is disallowed. + Decrypting bytea data with pgp_sym_decrypt is disallowed. This is to avoid outputting invalid character data. Decrypting originally textual data with pgp_sym_decrypt_bytea is fine. @@ -561,7 +561,7 @@ pgp_pub_decrypt_bytea(msg bytea, key bytea [, psw text [, options text ]]) retur options, you need to give an empty password. - Decrypting bytea data with pgp_pub_decrypt is disallowed. + Decrypting bytea data with pgp_pub_decrypt is disallowed. This is to avoid outputting invalid character data. Decrypting originally textual data with pgp_pub_decrypt_bytea is fine. @@ -621,7 +621,7 @@ armor(data bytea) returns text dearmor(data text) returns bytea - These functions wrap/unwrap binary data into PGP Ascii Armor format, + These functions wrap/unwrap binary data into PGP ASCII-armor format, which is basically Base64 with CRC and additional formatting. @@ -825,13 +825,13 @@ gpg --list-secret-keys - To export a public key in ascii-armor format: + To export a public key in ASCII-armor format: gpg -a --export KEYID > public.key - To export a secret key in ascii-armor format: + To export a secret key in ASCII-armor format: gpg -a --export-secret-keys KEYID > secret.key @@ -1206,7 +1206,7 @@ gen_random_bytes(count integer) returns bytea - Jean-Luc Cooke Fortuna-based /dev/random driver for Linux. + Jean-Luc Cooke Fortuna-based /dev/random driver for Linux. diff --git a/doc/src/sgml/pgfreespacemap.sgml b/doc/src/sgml/pgfreespacemap.sgml index 49edd7a4fe..388df1d2a0 100644 --- a/doc/src/sgml/pgfreespacemap.sgml +++ b/doc/src/sgml/pgfreespacemap.sgml @@ -1,4 +1,4 @@ - + pg_freespacemap @@ -33,7 +33,6 @@ Returns the amount of free space on the page of the relation, specified by blkno, according to the FSM. - (blkno). @@ -56,7 +55,7 @@ The values stored in the free space map are not exact. They're rounded - to precision of 1/256th of BLCKSZ (32 bytes with default BLCKSZ), and + to precision of 1/256th of BLCKSZ (32 bytes with default BLCKSZ), and they're not kept fully up-to-date as tuples are inserted and updated. diff --git a/doc/src/sgml/pgstandby.sgml b/doc/src/sgml/pgstandby.sgml index 1969cb6b29..1bca013ec4 100644 --- a/doc/src/sgml/pgstandby.sgml +++ b/doc/src/sgml/pgstandby.sgml @@ -1,4 +1,4 @@ - + pg_standby @@ -202,7 +202,7 @@ pg_standby option ... archiv Specify a trigger file whose presence should cause failover. - It is recommended that you use a structured filename to + It is recommended that you use a structured file name to avoid confusion as to which server is being triggered when multiple servers exist on the same system; for example /tmp/pgsql.trigger.5432. @@ -326,7 +326,7 @@ recovery_end_command = 'del C:\pgsql.trigger.5442' The copy command on Windows sets the final file size - before the file is completely copied, which would ordinarly confuse + before the file is completely copied, which would ordinarily confuse pg_standby. Therefore pg_standby waits sleeptime seconds once it sees the proper file size. GNUWin32's cp diff --git a/doc/src/sgml/pgstattuple.sgml b/doc/src/sgml/pgstattuple.sgml index e538e4f502..a00dfecf9c 100644 --- a/doc/src/sgml/pgstattuple.sgml +++ b/doc/src/sgml/pgstattuple.sgml @@ -1,4 +1,4 @@ - + pgstattuple @@ -140,7 +140,7 @@ free_percent | 1.95 pgstatindex returns a record showing information - about a btree index. For example: + about a B-tree index. For example: test=> SELECT * FROM pgstatindex('pg_cast_oid_index'); -[ RECORD 1 ]------+------ @@ -174,7 +174,7 @@ leaf_fragmentation | 0 version integer - Btree version number + B-tree version number diff --git a/doc/src/sgml/pgtrgm.sgml b/doc/src/sgml/pgtrgm.sgml index 0e94f88686..a1dfe063f3 100644 --- a/doc/src/sgml/pgtrgm.sgml +++ b/doc/src/sgml/pgtrgm.sgml @@ -1,4 +1,4 @@ - + pg_trgm @@ -130,7 +130,7 @@ operator classes that allow you to create an index over a text column for the purpose of very fast similarity searches. These index types support the % similarity operator (and no other operators, so you may - want a regular btree index too). + want a regular B-tree index too). diff --git a/doc/src/sgml/pgupgrade.sgml b/doc/src/sgml/pgupgrade.sgml index 0791c98750..bc96fbeb6e 100644 --- a/doc/src/sgml/pgupgrade.sgml +++ b/doc/src/sgml/pgupgrade.sgml @@ -1,4 +1,4 @@ - + pg_upgrade @@ -8,21 +8,21 @@ - pg_upgrade (formerly called pg_migrator) allows data + pg_upgrade (formerly called pg_migrator) allows data stored in PostgreSQL data files to be migrated to a later PostgreSQL major version without the data dump/reload typically required for major version upgrades, e.g. from 8.4.7 to the current major release - of PostgreSQL. It is not required for minor version upgrades, e.g - 9.0.1 -> 9.0.4. + of PostgreSQL. It is not required for minor version upgrades, e.g. from + 9.0.1 to 9.0.4. pg_upgrade works because, though new features are - regularly added to Postgres major releases, the internal data storage + regularly added to PostgreSQL major releases, the internal data storage format rarely changes. pg_upgrade does its best to make sure the old and new clusters are binary-compatible, e.g. by checking for compatible compile-time settings. It is important that - any external modules are also binary compatibile, though this cannot + any external modules are also binary compatible, though this cannot be checked by pg_upgrade. @@ -144,50 +144,43 @@ Upgrade Steps - - - - Optionally move the old cluster - + + + Optionally move the old cluster If you are using a version-specific installation directory, e.g. - /opt/PostgreSQL/8.4, you do not need to move the old cluster. The - one-click installers all use version-specific install directories. + /opt/PostgreSQL/8.4, you do not need to move the old cluster. The + one-click installers all use version-specific installation directories. If your installation directory is not version-specific, e.g. - /usr/local/pgsql, it is necessary to move the current PostgreSQL install + /usr/local/pgsql, it is necessary to move the current PostgreSQL install directory so it does not interfere with the new PostgreSQL installation. Once the current PostgreSQL server is shut down, it is safe to rename the - PostgreSQL install directory; assuming the old directory is - /usr/local/pgsql, you can do: + PostgreSQL installation directory; assuming the old directory is + /usr/local/pgsql, you can do: mv /usr/local/pgsql /usr/local/pgsql.old to rename the directory. + - - - - - For source installs, build the new version - + + For source installs, build the new version - Build the new PostgreSQL source with configure flags that are compatible + Build the new PostgreSQL source with configure flags that are compatible with the old cluster. pg_upgrade will check pg_controldata to make sure all settings are compatible before starting the upgrade. - + - - - Install the new PostgreSQL binaries - + + Install the new PostgreSQL binaries Install the new server's binaries and support files. You can use the @@ -197,75 +190,67 @@ mv /usr/local/pgsql /usr/local/pgsql.old For source installs, if you wish to install the new server in a custom - location, use 'prefix': + location, use the prefix variable: gmake prefix=/usr/local/pgsql.new install - + - + + Install pg_upgrade + Install pg_upgrade and pg_upgrade_support in the new PostgreSQL cluster - + - - - Initialize the new PostgreSQL cluster - + + Initialize the new PostgreSQL cluster - Initialize the new cluster ,initdb. - Again, use compatible initdb + Initialize the new cluster using initdb. + Again, use compatible initdb flags that match the old cluster. Many prebuilt installers do this step automatically. There is no need to start the new cluster. + - - - - - Install custom shared object files (or DLLs) - + + Install custom shared object files Install any custom shared object files (or DLLs) used by the old cluster - into the new cluster, e.g. pgcrypto.so, whether they are from /contrib + into the new cluster, e.g. pgcrypto.so, whether they are from contrib or some other source. Do not install the schema definitions, e.g. - pgcrypto.sql --- these will be migrated from the old cluster. + pgcrypto.sql, because these will be migrated from the old cluster. - + - - - Adjust authentication - + + Adjust authentication - pg_upgrade will connect to the old and new servers several times, + pg_upgrade will connect to the old and new servers several times, so you might want to set authentication to trust in pg_hba.conf, or if using md5 authentication, use a ~/.pgpass file (see ) to avoid being prompted repeatedly for a password. - + - - - Stop both servers - + + Stop both servers Make sure both database servers are stopped using on Unix, e.g.: -pg_ctl --pgdata /opt/PostgreSQL/8.4 stop -pg_ctl --pgdata /opt/PostgreSQL/9.0 stop +pg_ctl -D /opt/PostgreSQL/8.4 stop +pg_ctl -D /opt/PostgreSQL/9.0 stop or on Windows @@ -281,27 +266,25 @@ NET STOP postgresql-9.0 NET STOP pgsql-8.3 (PostgreSQL 8.3 and older used a different service name) - + - - - Run pg_upgrade - + + Run <application>pg_upgrade</> Always run the pg_upgrade binary in the new server, not the old one. pg_upgrade requires the specification of the old and new cluster's - PGDATA and executable (/bin) directories. You can also specify separate + data and executable (bin) directories. You can also specify separate user and port values, and whether you want the data linked instead of copied (the default). If you use linking, the migration will be much faster (no data copying), but you will no longer be able to access your old cluster once you start the new cluster after the upgrade. See - pg_upgrade --help for a full list of options. + pg_upgrade --help for a full list of options. For Windows users, you must be logged into an administrative account, and - then start a shell as the 'postgres' user and set the proper path: + then start a shell as the postgres user and set the proper path: RUNAS /USER:postgres "CMD.EXE" @@ -318,10 +301,10 @@ pg_upgrade.exe --new-bindir "C:/Program Files/PostgreSQL/9.0/bin" - Once started, pg_upgrade will verify the two clusters are compatible - and then do the migration. You can use pg_upgrade @@ -330,31 +313,27 @@ pg_upgrade.exe - If an error occurs while restoring the database schema, pg_upgrade will - exit and you will have to revert to the old cluster as outlined in step - #15 below. To try pg_upgrade again, you will need to modify the old + If an error occurs while restoring the database schema, pg_upgrade will + exit and you will have to revert to the old cluster as outlined in + below. To try pg_upgrade again, you will need to modify the old cluster so the pg_upgrade schema restore succeeds. If the problem is a - /contrib module, you might need to uninstall the /contrib module from + contrib module, you might need to uninstall the contrib module from the old cluster and install it in the new cluster after the migration, assuming the module is not being used to store user data. - + - - - Restore pg_hba.conf - + + Restore <filename>pg_hba.conf</> If you modified pg_hba.conf to use trust, restore its original authentication settings. - + - - - Post-Migration processing - + + Post-migration processing If any post-migration processing is required, pg_upgrade will issue @@ -379,76 +358,81 @@ psql --username postgres --file script.sql postgres scripts can be accessed immediately. - + - - - Statistics - - + + Statistics + - Because optimizer statistics are not transferred by pg_upgrade, you will + Because optimizer statistics are not transferred by pg_upgrade, you will be instructed to run a command to regenerate that information at the end of the migration. - - + - - - Delete old cluster - + + Delete old cluster Once you are satisfied with the upgrade, you can delete the old cluster's data directories by running the script mentioned when - pg_upgrade completes. You will need to manually delete the old install - directories, e.g. /bin, /share. - - - - - - Reverting to old cluster - - - - If, after running pg_upgrade, you wish to revert to the old cluster, - there are several options. - - - - If you ran pg_upgrade with - - - If you ran pg_upgrade with - - - If you ran pg_upgrade without_ - - - + pg_upgrade completes. You can also delete the + old installation directories + (e.g. bin, share). + + + + + Reverting to old cluster + + + If, after running pg_upgrade, you wish to revert to the old cluster, + there are several options: + + + + + If you ran pg_upgrade + with + + + + + If you ran pg_upgrade + with + + + + + If you + ran pg_upgrade without + + + + + - Limitations in migrating <emphasis>from</> PostgreSQL 8.3 + Limitations in Migrating <emphasis>from</> PostgreSQL 8.3 Upgrading from PostgreSQL 8.3 has additional restrictions not present @@ -478,7 +462,7 @@ psql --username postgres --file script.sql postgres - a user column is of data type tsvector + a user column is of data type tsvector @@ -489,7 +473,7 @@ psql --username postgres --file script.sql postgres - an index is of type hash or gin + an index is of type hash or GIN @@ -522,7 +506,7 @@ psql --username postgres --file script.sql postgres pg_upgrade does not support migration of databases - containing these reg* system oid-referencing data types: + containing these reg* OID-referencing system data types: regproc, regprocedure, regoper, regoperator, regclass, regconfig, and regdictionary. (regtype can be migrated.) @@ -544,9 +528,9 @@ psql --username postgres --file script.sql postgres If you want to use link mode and you don't want your old cluster to be modified when the new cluster is started, make a copy of the old cluster and migrate that with link mode. To make a valid copy - of the old cluster, use rsync to create a dirty + of the old cluster, use rsync to create a dirty copy of the old cluster while the server is running, then shut down - the old server and run rsync again to update the copy with any + the old server and run rsync again to update the copy with any changes to make it consistent. diff --git a/doc/src/sgml/planstats.sgml b/doc/src/sgml/planstats.sgml index 1d6e52afd9..f73120dfdd 100644 --- a/doc/src/sgml/planstats.sgml +++ b/doc/src/sgml/planstats.sgml @@ -1,4 +1,4 @@ - + How the Planner Uses Statistics @@ -428,7 +428,7 @@ rows = (outer_cardinality * inner_cardinality) * selectivity the unmodified size of tenk2. It might appear from inspection of the EXPLAIN output that the estimate of join rows comes from 50 * 1, that is, the number of outer rows times - the estimated number of rows obtained by each inner indexscan on + the estimated number of rows obtained by each inner index scan on tenk2. But this is not the case: the join relation size is estimated before any particular join plan has been considered. If everything is working well then the two ways of estimating the join diff --git a/doc/src/sgml/plperl.sgml b/doc/src/sgml/plperl.sgml index aa0e5b3135..46d2ad5036 100644 --- a/doc/src/sgml/plperl.sgml +++ b/doc/src/sgml/plperl.sgml @@ -1,4 +1,4 @@ - + PL/Perl - Perl Procedural Language @@ -694,7 +694,7 @@ SELECT release_hosts_query(); Return the unescaped binary data represented by the contents of the given string, - which should be bytea encoded. + which should be bytea encoded. @@ -708,7 +708,7 @@ SELECT release_hosts_query(); encode_bytea(string) - Return the bytea encoded form of the binary data contents of the given string. + Return the bytea encoded form of the binary data contents of the given string. diff --git a/doc/src/sgml/plpgsql.sgml b/doc/src/sgml/plpgsql.sgml index d5ce832beb..3763a59fc5 100644 --- a/doc/src/sgml/plpgsql.sgml +++ b/doc/src/sgml/plpgsql.sgml @@ -1,4 +1,4 @@ - + <application>PL/pgSQL</application> - <acronym>SQL</acronym> Procedural Language @@ -1842,7 +1842,7 @@ END CASE; The searched form of CASE provides conditional execution - based on truth of boolean expressions. Each WHEN clause's + based on truth of Boolean expressions. Each WHEN clause's boolean-expression is evaluated in turn, until one is found that yields true. Then the corresponding statements are executed, and diff --git a/doc/src/sgml/ref/alter_opfamily.sgml b/doc/src/sgml/ref/alter_opfamily.sgml index 70f20db2b4..06034c950f 100644 --- a/doc/src/sgml/ref/alter_opfamily.sgml +++ b/doc/src/sgml/ref/alter_opfamily.sgml @@ -1,5 +1,5 @@ @@ -240,7 +240,7 @@ ALTER OPERATOR FAMILY name USING name [ IN DATABASE postgresql.conf or has been received from the postgres + postgresql.conf or has been received from the postgres command line. This only happens at login time; executing or does not cause new diff --git a/doc/src/sgml/ref/create_opclass.sgml b/doc/src/sgml/ref/create_opclass.sgml index 500a16b658..696e85f2d8 100644 --- a/doc/src/sgml/ref/create_opclass.sgml +++ b/doc/src/sgml/ref/create_opclass.sgml @@ -1,5 +1,5 @@ @@ -250,7 +250,7 @@ CREATE OPERATOR CLASS name [ DEFAUL Before PostgreSQL 8.4, the OPERATOR clause could include a RECHECK option. This is no longer supported because whether an index operator is lossy is now - determined on-the-fly at runtime. This allows efficient handling of + determined on-the-fly at run time. This allows efficient handling of cases where an operator might or might not be lossy. diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml index 0c02d0cb91..a1192d3901 100644 --- a/doc/src/sgml/ref/create_table.sgml +++ b/doc/src/sgml/ref/create_table.sgml @@ -1,5 +1,5 @@ @@ -500,7 +500,7 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } ] TABLE ); at present this means GIN cannot be used. Although it's allowed, there is little point in using - btree or hash indexes with an exclusion constraint, because this + B-tree or hash indexes with an exclusion constraint, because this does nothing that an ordinary unique constraint doesn't do better. So in practice the access method will always be GiST. diff --git a/doc/src/sgml/ref/create_trigger.sgml b/doc/src/sgml/ref/create_trigger.sgml index 6ec2766fa4..0fac156fee 100644 --- a/doc/src/sgml/ref/create_trigger.sgml +++ b/doc/src/sgml/ref/create_trigger.sgml @@ -1,5 +1,5 @@ @@ -74,7 +74,7 @@ CREATE TRIGGER name { BEFORE | AFTE - Also, a trigger definition can specify a boolean WHEN + Also, a trigger definition can specify a Boolean WHEN condition, which will be tested to see whether the trigger should be fired. In row-level triggers the WHEN condition can examine the old and/or new values of columns of the row. Statement-level diff --git a/doc/src/sgml/ref/pg_ctl-ref.sgml b/doc/src/sgml/ref/pg_ctl-ref.sgml index 61e4e397c9..6eaa950c1e 100644 --- a/doc/src/sgml/ref/pg_ctl-ref.sgml +++ b/doc/src/sgml/ref/pg_ctl-ref.sgml @@ -1,5 +1,5 @@ @@ -402,8 +402,8 @@ PostgreSQL documentation - Default hostname or Unix-domain socket location for (used by the -w option). + Default host name or Unix-domain socket location for (used by the option). @@ -413,7 +413,7 @@ PostgreSQL documentation - Default port number for (used by the -w option). + Default port number for (used by the option). diff --git a/doc/src/sgml/ref/pg_dumpall.sgml b/doc/src/sgml/ref/pg_dumpall.sgml index 95a37512ee..0cc10fc4e8 100644 --- a/doc/src/sgml/ref/pg_dumpall.sgml +++ b/doc/src/sgml/ref/pg_dumpall.sgml @@ -1,5 +1,5 @@ @@ -395,8 +395,8 @@ PostgreSQL documentation Specifies the name of the database to connect to to dump global objects and discover what other databases should be dumped. If - not specified, the postgres database will be used, - and if that does not exist, template1 will be used. + not specified, the postgres database will be used, + and if that does not exist, template1 will be used. diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index 70a8b41ee6..50f9408bb2 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -1,4 +1,4 @@ - + @@ -221,7 +221,7 @@ create indexes, or create constraints — using multiple concurrent jobs. This option can dramatically reduce the time to restore a large database to a server running on a - multi-processor machine. + multiprocessor machine. diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml index 935572be06..006c6ba40d 100644 --- a/doc/src/sgml/ref/psql-ref.sgml +++ b/doc/src/sgml/ref/psql-ref.sgml @@ -1,5 +1,5 @@ @@ -1034,7 +1034,7 @@ testdb=> If pattern is specified, only those servers whose name matches the pattern are listed. If the form \des+ is used, a - full desription of each server is shown, including the + full description of each server is shown, including the server's ACL, type, version, and options. @@ -1675,8 +1675,8 @@ lo_import 152801 Prompts the user to set variable name. An optional prompt, text, can be specified. (For multi-word - prompts, use single-quotes.) + class="parameter">text, can be specified. (For multiword + prompts, use single quotes.) @@ -2757,7 +2757,7 @@ testdb=> SELECT * FROM :"foo"; testdb=> \set content `cat my_file.txt` testdb=> INSERT INTO my_table VALUES (:'content'); - (Note that this still won't work if my_file.txt contains NUL bytes. + (Note that this still won't work if my_file.txt contains NUL bytes. psql does not support embedded NUL bytes in variable values.) diff --git a/doc/src/sgml/regress.sgml b/doc/src/sgml/regress.sgml index 6052db3e4a..cb58037fe5 100644 --- a/doc/src/sgml/regress.sgml +++ b/doc/src/sgml/regress.sgml @@ -1,4 +1,4 @@ - + Regression Tests @@ -165,8 +165,8 @@ psql -h primary -c "CREATE DATABASE regression" psql -h primary -f src/test/regress/sql/hs_primary_setup.sql regression Now confirm that the default connection for the tester is the standby - server under test and then run standbycheck from the regression - directory. + server under test and then run the standbycheck target from the regression + directory: cd src/test/regress gmake standbycheck diff --git a/doc/src/sgml/release-9.0.sgml b/doc/src/sgml/release-9.0.sgml index 03a0e43f19..9002ea925f 100644 --- a/doc/src/sgml/release-9.0.sgml +++ b/doc/src/sgml/release-9.0.sgml @@ -1,4 +1,4 @@ - + Release 9.0 @@ -99,7 +99,7 @@ New and enhanced security features, including RADIUS authentication, - LDAP authentication improvements, and the new checkpassword optional module + LDAP authentication improvements, and the new checkpassword optional module for testing password strength. @@ -117,7 +117,7 @@ - Add /contrib/pg_upgrade + Add contrib/pg_upgrade to support in-place upgrades from 8.3 or 8.4 to 9.0. @@ -149,14 +149,14 @@ - EXPLAIN plans are now available in JSON, XML and YAML format, and include + EXPLAIN plans are now available in JSON, XML, and YAML format, and include buffer utilization and other data not previously available. - The HStore optional module has been improved with new functions and greater + The hstore optional module has been improved with new functions and greater data capacity to make it a high-performance key-value store. @@ -205,7 +205,7 @@ Remove server variable regex_flavor, which was defaulted to advanced - (e.g. Perl-regex compatible) for many years. (Tom Lane) + (i.e., Perl compatible) for many years. (Tom Lane) @@ -554,7 +554,7 @@ This is particularly useful for finding MAX()/MIN() values in indexes that also - contain NULLs. + contain null values. @@ -581,7 +581,7 @@ While the Genetic Query Optimizer (GEQO) still selects random plans, it now selects the same random plans for identical queries. You can modify geqo_seed to randomize + linkend="guc-geqo-seed">geqo_seed to randomize the starting value of the random plan generator. @@ -766,7 +766,7 @@ - Now there is true multi-lingual support for PostgreSQL log messages + Now there is true multilingual support for PostgreSQL log messages on Windows. @@ -827,7 +827,7 @@ - Add boolean variable bonjour, which controls whether a Bonjour-enabled binary advertises itself via Bonjour (Tom Lane) @@ -840,7 +840,7 @@ - Add boolean variable enable_material, which controls the use of materialize nodes in the optimizer (Robert Haas) @@ -1390,7 +1390,7 @@ - Add point_ops opclass for GiST (Teodor Sigaev) + Add point_ops operator class for GiST (Teodor Sigaev) @@ -1433,7 +1433,7 @@ The variable bytea_output controls + linkend="guc-bytea-output">bytea_output controls if hex (default) or octal escapes are used for bytea output. (SWITCH DEFAULT FOR BETA? PETER) Libpq's PQescapeByteaConn() now uses the hex format @@ -1516,8 +1516,8 @@ Previously, only ASCII characters and single-byte - encodings worked properly. Other multi-byte, non-UTF-8 - encodings are still broken for case-insensitive regular expression + encodings worked properly. Multibyte encodings other than UTF-8 + are still broken for case-insensitive regular expression matching. @@ -1868,14 +1868,14 @@ Add server variable plperl.on_init to + linkend="guc-plperl-on-init">plperl.on_init to specify a PL/Perl Perl initialization function (Tim Bunce) plperl.on_plperl_init + linkend="guc-plperl-on-plperl-init">plperl.on_plperl_init and plperl.on_plperlu_init are also available for trusted/untrusted-specific initialization. @@ -1904,7 +1904,7 @@ This can be globally enabled with the server variable plperl.use_strict. + linkend="guc-plperl-use-strict">plperl.use_strict. @@ -1965,8 +1965,8 @@ Bytea values passed into PL/Python now are represented as - binary, rather than the Postgres bytea text format. Null - bytes are now also output properly from PL/Python. Boolean + binary, rather than the PostgreSQL bytea text format. Null + bytes are now also output properly from PL/Python. boolean and numeric value passing in PL/Python was also improved. @@ -2489,7 +2489,7 @@ These are similar to the existing all, install, and installcheck targets, but they build HTML - documentation, build and test /contrib, and test + documentation, build and test contrib, and test server-side languages and ecpg. @@ -2582,7 +2582,7 @@ - Allow multi-processor compilation using Microsoft Visual + Allow multiprocessor compilation using Microsoft Visual C (Magnus Hagander) @@ -2647,7 +2647,7 @@ - Improve source code test coverage, including /contrib, PL/Python, + Improve source code test coverage, including contrib, PL/Python, and PL/Perl (Peter Eisentraut, Andrew Dunstan) @@ -2839,7 +2839,7 @@ - Add /contrib/pg_upgrade + Add contrib/pg_upgrade to support in-place upgrades (Bruce Momjian) @@ -2894,8 +2894,8 @@ - Add multi-threaded option ( @@ -2908,7 +2908,7 @@ Add \shell and \setshell meta commands to /contrib/pgbench + linkend="pgbench">contrib/pgbench (Michael Paquier) @@ -2916,7 +2916,7 @@ New features for /contrib/dict_xsyn + linkend="dict-xsyn">contrib/dict_xsyn (Sergey Karpov) @@ -2929,7 +2929,7 @@ Add full text dictionary /contrib/unaccent + linkend="unaccent">contrib/unaccent (Teodor Sigaev) @@ -2943,17 +2943,17 @@ Add dblink_get_notify() - to /contrib/dblink (Marcus Kempe) + to contrib/dblink (Marcus Kempe) - This allows async notifications in dblink. + This allows asynchronous notifications in dblink. - Improve /contrib/dblinks handling of dropped columns + Improve contrib/dblinks handling of dropped columns (Tom Lane) @@ -2967,22 +2967,22 @@ Greatly increase /contrib/hstore's - length limit and add btree and hash abilities so GROUP + linkend="hstore">contrib/hstore's + length limit and add B-tree and hash abilities so GROUP BY and DISTINCT operations are possible (Andrew Gierth) New functions and operators were also added. These improvements - make HStore a full-functional key-value store embedded in PostgreSQL. + make hstore a full-functional key-value store embedded in PostgreSQL. Add /contrib/passwordcheck + linkend="passwordcheck">contrib/passwordcheck which can check the strength of assigned passwords (Laurenz Albe) @@ -2996,7 +2996,7 @@ Add /contrib/pg_archivecleanup + linkend="pgarchivecleanup">contrib/pg_archivecleanup tool (Simon Riggs) @@ -3009,7 +3009,7 @@ Add query text to /contrib/auto_explain + linkend="auto-explain">contrib/auto_explain output (Andrew Dunstan) @@ -3017,7 +3017,7 @@ Add buffer access counters to /contrib/pg_stat_statements + linkend="pgstatstatements">contrib/pg_stat_statements (Itagaki Takahiro) @@ -3025,7 +3025,7 @@ Update /contrib/start-scripts/linux + linkend="server-start">contrib/start-scripts/linux to use /proc/self/oom_adj to disable the Linux out-of-memory (OOM) killer (Alex diff --git a/doc/src/sgml/release.sgml b/doc/src/sgml/release.sgml index 6687cc50c9..448738e90f 100644 --- a/doc/src/sgml/release.sgml +++ b/doc/src/sgml/release.sgml @@ -1,4 +1,4 @@ - + + seg @@ -237,7 +237,7 @@ test=> select '6.25 .. 6.50'::seg as "pH"; The seg module includes a GiST index operator class for seg values. - The operators supported by the GiST opclass are shown in . + The operators supported by the GiST operator class are shown in .
@@ -308,7 +308,7 @@ test=> select '6.25 .. 6.50'::seg as "pH"; respectively called @ and ~. These names are still available, but are deprecated and will eventually be retired. Notice that the old names are reversed from the convention formerly followed by the core geometric - datatypes!) + data types!) diff --git a/doc/src/sgml/spi.sgml b/doc/src/sgml/spi.sgml index dd50665e10..033e224909 100644 --- a/doc/src/sgml/spi.sgml +++ b/doc/src/sgml/spi.sgml @@ -1,4 +1,4 @@ - + Server Programming Interface @@ -987,7 +987,7 @@ SPIPlanPtr SPI_prepare_cursor(const char * command, int < SPI_prepare_cursor is identical to SPI_prepare, except that it also allows specification - of the planner's cursor options parameter. This is a bitmask + of the planner's cursor options parameter. This is a bit mask having the values shown in nodes/parsenodes.h for the options field of DeclareCursorStmt. SPI_prepare always takes the cursor options as zero. @@ -1030,7 +1030,7 @@ SPIPlanPtr SPI_prepare_cursor(const char * command, int < int cursorOptions - integer bitmask of cursor options; zero produces default behavior + integer bit mask of cursor options; zero produces default behavior @@ -1130,7 +1130,7 @@ SPIPlanPtr SPI_prepare_params(const char * command, int cursorOptions - integer bitmask of cursor options; zero produces default behavior + integer bit mask of cursor options; zero produces default behavior @@ -1925,7 +1925,7 @@ Portal SPI_cursor_open_with_args(const char *name, int cursorOptions - integer bitmask of cursor options; zero produces default behavior + integer bit mask of cursor options; zero produces default behavior diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml index c4b38ddb6c..40e305e787 100644 --- a/doc/src/sgml/storage.sgml +++ b/doc/src/sgml/storage.sgml @@ -1,4 +1,4 @@ - + @@ -817,7 +817,7 @@ data. Empty in ordinary tables. the next. Then make sure you have the right alignment. If the field is a fixed width field, then all the bytes are simply placed. If it's a variable length field (attlen = -1) then it's a bit more complicated. - All variable-length datatypes share the common header structure + All variable-length data types share the common header structure struct varlena, which includes the total length of the stored value and some flag bits. Depending on the flags, the data can be either inline or in a TOAST table; diff --git a/doc/src/sgml/tablefunc.sgml b/doc/src/sgml/tablefunc.sgml index 437ca7b6f8..59aa059ae6 100644 --- a/doc/src/sgml/tablefunc.sgml +++ b/doc/src/sgml/tablefunc.sgml @@ -1,4 +1,4 @@ - + tablefunc @@ -46,7 +46,7 @@ Produces a pivot table containing row names plus N value columns, where - N is determined by the rowtype specified in the calling + N is determined by the row type specified in the calling query @@ -219,9 +219,9 @@ SELECT * FROM crosstab('...') AS ct(row_name text, category_1 text, category_2 t The FROM clause must define the output as one - row_name column (of the same datatype as the first result + row_name column (of the same data type as the first result column of the SQL query) followed by N value columns - (all of the same datatype as the third result column of the SQL query). + (all of the same data type as the third result column of the SQL query). You can set up as many output value columns as you wish. The names of the output columns are up to you. @@ -299,7 +299,7 @@ crosstabN(text sql) so that you need not write out column names and types in the calling SELECT query. The tablefunc module includes crosstab2, crosstab3, and - crosstab4, whose output rowtypes are defined as + crosstab4, whose output row types are defined as diff --git a/doc/src/sgml/trigger.sgml b/doc/src/sgml/trigger.sgml index a40963fd5b..d132553c13 100644 --- a/doc/src/sgml/trigger.sgml +++ b/doc/src/sgml/trigger.sgml @@ -1,4 +1,4 @@ - + Triggers @@ -141,7 +141,7 @@ - A trigger definition can also specify a boolean WHEN + A trigger definition can also specify a Boolean WHEN condition, which will be tested to see whether the trigger should be fired. In row-level triggers the WHEN condition can examine the old and/or new values of columns of the row. (Statement-level diff --git a/doc/src/sgml/unaccent.sgml b/doc/src/sgml/unaccent.sgml index 6d6a1d6b13..ff6a2989dd 100644 --- a/doc/src/sgml/unaccent.sgml +++ b/doc/src/sgml/unaccent.sgml @@ -9,7 +9,7 @@ unaccent removes accents (diacritic signs) from a lexeme. It's a filtering dictionary, that means its output is always passed to the next dictionary (if any), contrary to the standard - behavior. Currently, it supports most important accents from european + behavior. Currently, it supports most important accents from European languages. diff --git a/doc/src/sgml/vacuumlo.sgml b/doc/src/sgml/vacuumlo.sgml index c75e042f09..04b59c70d7 100644 --- a/doc/src/sgml/vacuumlo.sgml +++ b/doc/src/sgml/vacuumlo.sgml @@ -1,4 +1,4 @@ - + vacuumlo @@ -52,7 +52,7 @@ vacuumlo [options] database [database2 ... databaseN] username - Username to connect as. + User name to connect as. diff --git a/doc/src/sgml/wal.sgml b/doc/src/sgml/wal.sgml index 65cde74b3b..8a118cfece 100644 --- a/doc/src/sgml/wal.sgml +++ b/doc/src/sgml/wal.sgml @@ -1,4 +1,4 @@ - + Reliability and the Write-Ahead Log @@ -116,7 +116,7 @@ write caches. At the drive level, disable write-back caching if the drive cannot guarantee the data will be written before shutdown. You can test for reliable I/O subsystem behavior using diskchecker.pl. + url="http://brad.livejournal.com/2116715.html">diskchecker.pl. diff --git a/doc/src/sgml/xindex.sgml b/doc/src/sgml/xindex.sgml index 1da74b9fa3..48c9585ce4 100644 --- a/doc/src/sgml/xindex.sgml +++ b/doc/src/sgml/xindex.sgml @@ -1,4 +1,4 @@ - + Interfacing Extensions To Indexes @@ -416,39 +416,47 @@
GIN Support Functions - + Function + Description Support Number + compare - compare - compare two keys and return an integer less than zero, zero, + compare two keys and return an integer less than zero, zero, or greater than zero, indicating whether the first key is less than, equal to, or greater than the second 1 - extractValue - extract keys from a value to be indexed + extractValue + extract keys from a value to be indexed 2 - extractQuery - extract keys from a query condition + extractQuery + extract keys from a query condition 3 - consistent - determine whether value matches query condition + consistent + determine whether value matches query condition 4 - comparePartial - (optional method) compare partial key from + comparePartial + + (optional method) compare partial key from query and key from index, and return an integer less than zero, zero, or greater than zero, indicating whether GIN should ignore this index - entry, treat the entry as a match, or stop the index scan + entry, treat the entry as a match, or stop the index scan + 5 diff --git a/doc/src/sgml/xml2.sgml b/doc/src/sgml/xml2.sgml index eaeca083a1..3ebef17c77 100644 --- a/doc/src/sgml/xml2.sgml +++ b/doc/src/sgml/xml2.sgml @@ -1,4 +1,4 @@ - + xml2 @@ -90,7 +90,7 @@ xpath_nodeset(document, query, toptag, itemtag) returns text <itemtag>Value 2....</itemtag> </toptag> - If either toptag or itemtag is an empty string, the relevant tag is omitted. + If either toptag or itemtag is an empty string, the relevant tag is omitted. @@ -102,7 +102,7 @@ xpath_nodeset(document, query) returns text - Like xpath_nodeset(document,query,toptag,itemtag) but result omits both tags. + Like xpath_nodeset(document, query, toptag, itemtag) but result omits both tags. @@ -114,7 +114,7 @@ xpath_nodeset(document, query, itemtag) returns text - Like xpath_nodeset(document,query,toptag,itemtag) but result omits toptag. + Like xpath_nodeset(document, query, toptag, itemtag) but result omits toptag. @@ -295,7 +295,7 @@ WHERE t.author_id = p.person_id; may not be the same as the number of input documents. The first row returned contains the first result from each query, the second row the second result from each query. If one of the queries has fewer values - than the others, NULLs will be returned instead. + than the others, null values will be returned instead. @@ -337,8 +337,8 @@ WHERE id = 1 ORDER BY doc_num, line_num - To get doc_num on every line, the solution is to use two invocations - of xpath_table and join the results: + To get doc_num on every line, the solution is to use two invocations + of xpath_table and join the results: SELECT t.*,i.doc_num FROM @@ -377,7 +377,7 @@ xslt_process(text document, text stylesheet, text paramlist) returns text This function applies the XSL stylesheet to the document and returns - the transformed result. The paramlist is a list of parameter + the transformed result. The paramlist is a list of parameter assignments to be used in the transformation, specified in the form a=1,b=2. Note that the parameter parsing is very simple-minded: parameter values cannot diff --git a/doc/src/sgml/xoper.sgml b/doc/src/sgml/xoper.sgml index 628a5a91fa..b1b82e6968 100644 --- a/doc/src/sgml/xoper.sgml +++ b/doc/src/sgml/xoper.sgml @@ -1,4 +1,4 @@ - + User-Defined Operators @@ -435,7 +435,7 @@ table1.column1 OP table2.column2 To be marked MERGES, the join operator must appear - as an equality member of a btree index operator family. + as an equality member of a btree index operator family. This is not enforced when you create the operator, since of course the referencing operator family couldn't exist yet. But the operator will not actually be used for merge joins @@ -450,7 +450,7 @@ table1.column1 OP table2.column2 if they are different) that appears in the same operator family. If this is not the case, planner errors might occur when the operator is used. Also, it is a good idea (but not strictly required) for - a btree operator family that supports multiple data types to provide + a btree operator family that supports multiple data types to provide equality operators for every combination of the data types; this allows better optimization. -- 2.40.0