From 2aa0476dc38f7e510b8cde627e83b4c76fa05d61 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 24 May 2015 15:04:10 -0400 Subject: [PATCH] Manual cleanup of pgindent results. Fix some places where pgindent did silly stuff, often because project style wasn't followed to begin with. (I've not touched the atomics headers, though.) --- contrib/pg_audit/pg_audit.c | 76 +++++++------------- src/backend/access/tablesample/bernoulli.c | 15 ++-- src/backend/access/tablesample/tablesample.c | 9 ++- src/backend/executor/execUtils.c | 4 +- src/backend/executor/nodeAgg.c | 8 ++- src/backend/executor/nodeHash.c | 8 +-- src/backend/optimizer/plan/planner.c | 25 +++---- src/backend/rewrite/rowsecurity.c | 4 +- src/backend/utils/adt/jsonb.c | 2 +- src/backend/utils/adt/ruleutils.c | 4 +- 10 files changed, 68 insertions(+), 87 deletions(-) diff --git a/contrib/pg_audit/pg_audit.c b/contrib/pg_audit/pg_audit.c index a664d0804b..ffe13eb6b3 100644 --- a/contrib/pg_audit/pg_audit.c +++ b/contrib/pg_audit/pg_audit.c @@ -212,19 +212,19 @@ typedef struct int64 statementId; /* Simple counter */ int64 substatementId; /* Simple counter */ - LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, */ - /* generated when not. */ + LogStmtLevel logStmtLevel; /* From GetCommandLogLevel when possible, + * generated when not. */ NodeTag commandTag; /* same here */ const char *command; /* same here */ - const char *objectType; /* From event trigger when possible */ - /* generated when not. */ + const char *objectType; /* From event trigger when possible, generated + * when not. */ char *objectName; /* Fully qualified object identification */ const char *commandText; /* sourceText / queryString */ ParamListInfo paramList; /* QueryDesc/ProcessUtility parameters */ bool granted; /* Audit role has object permissions? */ - bool logged; /* Track if we have logged this event, used */ - /* post-ProcessUtility to make sure we log */ + bool logged; /* Track if we have logged this event, used + * post-ProcessUtility to make sure we log */ bool statementLogged; /* Track if we have logged the statement */ } AuditEvent; @@ -467,7 +467,7 @@ log_audit_event(AuditEventStackItem *stackItem) /* Classify the statement using log stmt level and the command tag */ switch (stackItem->auditEvent.logStmtLevel) { - /* All mods go in WRITE class, execpt EXECUTE */ + /* All mods go in WRITE class, except EXECUTE */ case LOGSTMT_MOD: className = CLASS_WRITE; class = LOG_WRITE; @@ -553,13 +553,14 @@ log_audit_event(AuditEventStackItem *stackItem) break; } - /* + /*---------- * Only log the statement if: * - * 1. If object was selected for audit logging (granted) 2. The statement - * belongs to a class that is being logged + * 1. If object was selected for audit logging (granted), or + * 2. The statement belongs to a class that is being logged * * If neither of these is true, return. + *---------- */ if (!stackItem->auditEvent.granted && !(auditLogBitmap & class)) return; @@ -979,57 +980,39 @@ log_select_dml(Oid auditOid, List *rangeTabls) switch (rte->relkind) { case RELKIND_RELATION: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_TABLE; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_TABLE; break; case RELKIND_INDEX: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_INDEX; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_INDEX; break; case RELKIND_SEQUENCE: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_SEQUENCE; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_SEQUENCE; break; case RELKIND_TOASTVALUE: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_TOASTVALUE; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_TOASTVALUE; break; case RELKIND_VIEW: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_VIEW; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_VIEW; break; case RELKIND_COMPOSITE_TYPE: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_COMPOSITE_TYPE; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_COMPOSITE_TYPE; break; case RELKIND_FOREIGN_TABLE: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_FOREIGN_TABLE; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_FOREIGN_TABLE; break; case RELKIND_MATVIEW: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_MATVIEW; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_MATVIEW; break; default: - auditEventStack->auditEvent.objectType = - OBJECT_TYPE_UNKNOWN; - + auditEventStack->auditEvent.objectType = OBJECT_TYPE_UNKNOWN; break; } @@ -1043,9 +1026,7 @@ log_select_dml(Oid auditOid, List *rangeTabls) /* Perform object auditing only if the audit role is valid */ if (auditOid != InvalidOid) { - AclMode auditPerms = - (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) & - rte->requiredPerms; + AclMode auditPerms = (ACL_SELECT | ACL_UPDATE | ACL_INSERT | ACL_DELETE) & rte->requiredPerms; /* * If any of the required permissions for the relation are granted @@ -1166,7 +1147,6 @@ log_function_execute(Oid objectId) stackItem->auditEvent.commandTag = T_DoStmt; stackItem->auditEvent.command = COMMAND_EXECUTE; stackItem->auditEvent.objectType = OBJECT_TYPE_FUNCTION; - stackItem->auditEvent.commandText = stackItem->next->auditEvent.commandText; log_audit_event(stackItem); @@ -1459,8 +1439,7 @@ pg_audit_ddl_command_end(PG_FUNCTION_ARGS) /* Supply object name and type for audit event */ auditEventStack->auditEvent.objectType = - SPI_getvalue(spiTuple, spiTupDesc, 1); - + SPI_getvalue(spiTuple, spiTupDesc, 1); auditEventStack->auditEvent.objectName = SPI_getvalue(spiTuple, spiTupDesc, 2); @@ -1545,8 +1524,7 @@ pg_audit_sql_drop(PG_FUNCTION_ARGS) spiTuple = SPI_tuptable->vals[row]; auditEventStack->auditEvent.objectType = - SPI_getvalue(spiTuple, spiTupDesc, 1); - + SPI_getvalue(spiTuple, spiTupDesc, 1); auditEventStack->auditEvent.objectName = SPI_getvalue(spiTuple, spiTupDesc, 2); @@ -1603,16 +1581,14 @@ check_pg_audit_log(char **newVal, void **extra, GucSource source) foreach(lt, flagRawList) { + char *token = (char *) lfirst(lt); bool subtract = false; int class; - /* Retrieve a token */ - char *token = (char *) lfirst(lt); - /* If token is preceded by -, then the token is subtractive */ - if (strstr(token, "-") == token) + if (token[0] == '-') { - token = token + 1; + token++; subtract = true; } diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c index 563a9168f0..0a53900822 100644 --- a/src/backend/access/tablesample/bernoulli.c +++ b/src/backend/access/tablesample/bernoulli.c @@ -80,8 +80,7 @@ Datum tsm_bernoulli_nextblock(PG_FUNCTION_ARGS) { TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - BernoulliSamplerData *sampler = - (BernoulliSamplerData *) tsdesc->tsmdata; + BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; /* * Bernoulli sampling scans all blocks on the table and supports syncscan @@ -117,10 +116,10 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS) * tuples have same probability of being returned the visible and invisible * tuples will be returned in same ratio as they have in the actual table. * This means that there is no skew towards either visible or invisible tuples - * and the number returned visible tuples to from the executor node is the - * fraction of visible tuples which was specified in input. + * and the number of visible tuples returned from the executor node should + * match the fraction of visible tuples which was specified by user. * - * This is faster than doing the coinflip in the examinetuple because we don't + * This is faster than doing the coinflip in examinetuple because we don't * have to do visibility checks on uninteresting tuples. * * If we reach end of the block return InvalidOffsetNumber which tells @@ -131,8 +130,7 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS) { TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); OffsetNumber maxoffset = PG_GETARG_UINT16(2); - BernoulliSamplerData *sampler = - (BernoulliSamplerData *) tsdesc->tsmdata; + BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; OffsetNumber tupoffset = sampler->lt; float4 probability = sampler->probability; @@ -185,8 +183,7 @@ Datum tsm_bernoulli_reset(PG_FUNCTION_ARGS) { TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - BernoulliSamplerData *sampler = - (BernoulliSamplerData *) tsdesc->tsmdata; + BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; sampler->blockno = InvalidBlockNumber; sampler->lt = InvalidOffsetNumber; diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c index 3398d02f85..44a24340f6 100644 --- a/src/backend/access/tablesample/tablesample.c +++ b/src/backend/access/tablesample/tablesample.c @@ -78,9 +78,12 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample) fcinfo.argnull[0] = false; /* - * Second arg for init function is always REPEATABLE When - * tablesample->repeatable is NULL then REPEATABLE clause was not - * specified. When specified, the expression cannot evaluate to NULL. + * Second arg for init function is always REPEATABLE. + * + * If tablesample->repeatable is NULL then REPEATABLE clause was not + * specified, and we insert a random value as default. + * + * When specified, the expression cannot evaluate to NULL. */ if (tablesample->repeatable) { diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 7e15b797a7..3c611b938b 100644 --- a/src/backend/executor/execUtils.c +++ b/src/backend/executor/execUtils.c @@ -645,10 +645,12 @@ get_last_attnums(Node *node, ProjectionInfo *projInfo) * overall targetlist's econtext. GroupingFunc arguments are never * evaluated at all. */ - if (IsA(node, Aggref) ||IsA(node, GroupingFunc)) + if (IsA(node, Aggref)) return false; if (IsA(node, WindowFunc)) return false; + if (IsA(node, GroupingFunc)) + return false; return expression_tree_walker(node, get_last_attnums, (void *) projInfo); } diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c index 31d74e9477..2bf48c54e3 100644 --- a/src/backend/executor/nodeAgg.c +++ b/src/backend/executor/nodeAgg.c @@ -1519,8 +1519,9 @@ agg_retrieve_direct(AggState *aggstate) /* * get state info from node * - * econtext is the per-output-tuple expression context tmpcontext is the - * per-input-tuple expression context + * econtext is the per-output-tuple expression context + * + * tmpcontext is the per-input-tuple expression context */ econtext = aggstate->ss.ps.ps_ExprContext; tmpcontext = aggstate->tmpcontext; @@ -1609,7 +1610,7 @@ agg_retrieve_direct(AggState *aggstate) else nextSetSize = 0; - /*- + /*---------- * If a subgroup for the current grouping set is present, project it. * * We have a new group if: @@ -1624,6 +1625,7 @@ agg_retrieve_direct(AggState *aggstate) * AND * - the previous and pending rows differ on the grouping columns * of the next grouping set + *---------- */ if (aggstate->input_done || (node->aggstrategy == AGG_SORTED && diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 2a04924054..906cb46b65 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -527,8 +527,8 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, * Buckets are simple pointers to hashjoin tuples, while tupsize * includes the pointer, hash code, and MinimalTupleData. So buckets * should never really exceed 25% of work_mem (even for - * NTUP_PER_BUCKET=1); except maybe * for work_mem values that are not - * 2^N bytes, where we might get more * because of doubling. So let's + * NTUP_PER_BUCKET=1); except maybe for work_mem values that are not + * 2^N bytes, where we might get more because of doubling. So let's * look for 50% here. */ Assert(bucket_bytes <= hash_table_bytes / 2); @@ -691,9 +691,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) if (batchno == curbatch) { /* keep tuple in memory - copy it into the new chunk */ - HashJoinTuple copyTuple = - (HashJoinTuple) dense_alloc(hashtable, hashTupleSize); + HashJoinTuple copyTuple; + copyTuple = (HashJoinTuple) dense_alloc(hashtable, hashTupleSize); memcpy(copyTuple, hashTuple, hashTupleSize); /* and add it back to the appropriate bucket */ diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 920c2b77ff..8afde2b7d5 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -1918,10 +1918,10 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) * whether HAVING succeeds. Furthermore, there cannot be any * variables in either HAVING or the targetlist, so we * actually do not need the FROM table at all! We can just - * throw away the plan-so-far and generate a Result node. - * This is a sufficiently unusual corner case that it's not - * worth contorting the structure of this routine to avoid - * having to generate the plan in the first place. + * throw away the plan-so-far and generate a Result node. This + * is a sufficiently unusual corner case that it's not worth + * contorting the structure of this routine to avoid having to + * generate the plan in the first place. */ result_plan = (Plan *) make_result(root, tlist, @@ -3157,22 +3157,23 @@ extract_rollup_sets(List *groupingSets) if (!lc1) return list_make1(groupingSets); - /* + /*---------- * We don't strictly need to remove duplicate sets here, but if we don't, * they tend to become scattered through the result, which is a bit - * confusing (and irritating if we ever decide to optimize them out). So - * we remove them here and add them back after. + * confusing (and irritating if we ever decide to optimize them out). + * So we remove them here and add them back after. * * For each non-duplicate set, we fill in the following: * - * orig_sets[i] = list of the original set lists set_masks[i] = bitmapset - * for testing inclusion adjacency[i] = array [n, v1, v2, ... vn] of - * adjacency indices + * orig_sets[i] = list of the original set lists + * set_masks[i] = bitmapset for testing inclusion + * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices * * chains[i] will be the result group this set is assigned to. * - * We index all of these from 1 rather than 0 because it is convenient to - * leave 0 free for the NIL node in the graph algorithm. + * We index all of these from 1 rather than 0 because it is convenient + * to leave 0 free for the NIL node in the graph algorithm. + *---------- */ orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *)); set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *)); diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 5a2f696934..aaf0061164 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -596,8 +596,8 @@ process_policies(Query *root, List *policies, int rt_index, Expr **qual_eval, *qual_eval = (Expr *) linitial(quals); /* - * Similairly, if more than one WITH CHECK qual is returned, then they - * need to be combined together. + * Similarly, if more than one WITH CHECK qual is returned, then they need + * to be combined together. * * with_check_quals is allowed to be NIL here since this might not be the * resultRelation (see above). diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index c0959a0ee2..e68972221a 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -584,7 +584,7 @@ add_indent(StringInfo out, bool indent, int level) * * Given the datatype OID, return its JsonbTypeCategory, as well as the type's * output function OID. If the returned category is JSONBTYPE_JSONCAST, - * we return the OID of the relevant cast function instead. + * we return the OID of the relevant cast function instead. */ static void jsonb_categorize_type(Oid typoid, diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index c404ae5e4c..5517113151 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -106,8 +106,8 @@ typedef struct int wrapColumn; /* max line length, or -1 for no limit */ int indentLevel; /* current indent level for prettyprint */ bool varprefix; /* TRUE to print prefixes on Vars */ - ParseExprKind special_exprkind; /* set only for exprkinds needing */ - /* special handling */ + ParseExprKind special_exprkind; /* set only for exprkinds needing + * special handling */ } deparse_context; /* -- 2.40.0