<!--
-$Header: /cvsroot/pgsql/doc/src/sgml/ref/postgres-ref.sgml,v 1.23 2001/12/08 03:24:38 thomas Exp $
+$Header: /cvsroot/pgsql/doc/src/sgml/ref/postgres-ref.sgml,v 1.24 2002/03/02 21:39:16 momjian Exp $
PostgreSQL documentation
-->
<para>
The options <option>-A</option>, <option>-B</option>,
<option>-c</option>, <option>-d</option>, <option>-D</option>,
- <option>-F</option>, and <option>--name</> have the same meanings as
- for the <xref linkend="app-postmaster">.
+ <option>-F</option>, and <option>--name</> have the same meanings
+ as the <xref linkend="app-postmaster"> except that
+ <option>-d</option> <literal>0</> prevents the debugging level of
+ the postmaster from being propogated to the backend.
</para>
<variablelist>
<!--
-$Header: /cvsroot/pgsql/doc/src/sgml/ref/postmaster.sgml,v 1.26 2001/12/08 03:24:38 thomas Exp $
+$Header: /cvsroot/pgsql/doc/src/sgml/ref/postmaster.sgml,v 1.27 2002/03/02 21:39:16 momjian Exp $
PostgreSQL documentation
-->
<listitem>
<para>
Sets the debug level. The higher this value is set, the more
- debugging output is written to the server log. The default is
- 0, which means no debugging. Values up to 4 are useful; higher
- numbers produce no additional output.
+ debugging output is written to the server log. Values are from
+ 1 to 5.
</para>
</listitem>
</varlistentry>
<!--
-$Header: /cvsroot/pgsql/doc/src/sgml/runtime.sgml,v 1.104 2002/03/01 22:45:05 petere Exp $
+$Header: /cvsroot/pgsql/doc/src/sgml/runtime.sgml,v 1.105 2002/03/02 21:39:15 momjian Exp $
-->
<Chapter Id="runtime">
<para>
<variablelist>
+ <varlistentry>
+ <term><varname>SERVER_MIN_MESSAGES</varname> (<type>string</type>)</term>
+ <listitem>
+ <para>
+ This controls how much detail is written to the server logs. The
+ default is <literal>NOTICE</>. Valid values are <literal>DEBUG5</>,
+ <literal>DEBUG4</>, <literal>DEBUG3</>, <literal>DEBUG2</>,
+ <literal>DEBUG1</>, <literal>INFO</>, <literal>NOTICE</>,
+ <literal>ERROR</>, <literal>LOG</>, <literal>FATAL</>,
+ <literal>PANIC</>. Later values send less detail to the logs.
+ <literal>LOG</> has a different precedence here than in
+ <literal>CLIENT_MIN_MESSAGES</>.
+ </para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry>
+ <term><varname>CLIENT_MIN_MESSAGES</varname> (<type>string</type>)</term>
+ <listitem>
+ <para>
+ This controls how much detail is written to the client. The
+ default is <literal>INFO</>. Valid values are
+ <literal>DEBUG5</>, <literal>DEBUG4</>, <literal>DEBUG3</>,
+ <literal>DEBUG2</>, <literal>DEBUG1</>, <literal>LOG</>,
+ <literal>INFO</>, <literal>NOTICE</>, <literal>ERROR</>,
+ <literal>FATAL</>, <literal>PANIC</>. Later values send less
+ information to the user. literal>LOG</> has a different
+ precedence here than in <literal>SERVER_MIN_MESSAGES</>.
+ </para>
+ </listitem>
+ </varlistentry>
+
<varlistentry>
<term><varname>DEBUG_ASSERTIONS</varname> (<type>boolean</type>)</term>
<listitem>
</listitem>
</varlistentry>
- <varlistentry>
- <term><varname>DEBUG_LEVEL</varname> (<type>integer</type>)</term>
- <listitem>
- <para>
- The higher this value is set, the more
- <quote>debugging</quote> output of various sorts is generated
- in the server log during operation. This option is 0 by
- default, which means no debugging output. Values up to about 4
- currently make sense.
- </para>
- </listitem>
- </varlistentry>
-
<varlistentry>
<term><varname>DEBUG_PRINT_QUERY</varname> (<type>boolean</type>)</term>
<term><varname>DEBUG_PRINT_PARSE</varname> (<type>boolean</type>)</term>
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.88 2002/02/11 22:41:59 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.89 2002/03/02 21:39:16 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
gist_redo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "gist_redo: unimplemented");
+ elog(PANIC, "gist_redo: unimplemented");
}
void
gist_undo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "gist_undo: unimplemented");
+ elog(PANIC, "gist_undo: unimplemented");
}
void
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.53 2001/10/25 05:49:20 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.54 2002/03/02 21:39:16 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
void
hash_redo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "hash_redo: unimplemented");
+ elog(PANIC, "hash_redo: unimplemented");
}
void
hash_undo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "hash_undo: unimplemented");
+ elog(PANIC, "hash_undo: unimplemented");
}
void
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.129 2002/01/15 22:14:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.130 2002/03/02 21:39:17 momjian Exp $
*
*
* INTERFACE ROUTINES
#ifdef HEAPDEBUGALL
if (ItemPointerIsValid(tid))
{
- elog(DEBUG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
+ elog(LOG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
RelationGetRelationName(relation), tid, tid->ip_blkid,
tid->ip_posid, dir);
}
else
{
- elog(DEBUG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
+ elog(LOG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
RelationGetRelationName(relation), tid, dir);
}
- elog(DEBUG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
+ elog(LOG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
- elog(DEBUG, "heapgettup: relation(%c)=`%s', %p",
+ elog(LOG, "heapgettup: relation(%c)=`%s', %p",
relation->rd_rel->relkind, RelationGetRelationName(relation),
snapshot);
-#endif /* !defined(HEAPDEBUGALL) */
+#endif /* !defined(HEAPLOGALL) */
if (!ItemPointerIsValid(tid))
{
#ifdef HEAPDEBUGALL
#define HEAPDEBUG_1 \
-elog(DEBUG, "heap_getnext([%s,nkeys=%d],backw=%d) called", \
+elog(LOG, "heap_getnext([%s,nkeys=%d],backw=%d) called", \
RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, backw)
#define HEAPDEBUG_2 \
- elog(DEBUG, "heap_getnext returning EOS")
+ elog(LOG, "heap_getnext returning EOS")
#define HEAPDEBUG_3 \
- elog(DEBUG, "heap_getnext returning tuple");
+ elog(LOG, "heap_getnext returning tuple");
#else
#define HEAPDEBUG_1
#define HEAPDEBUG_2
buffer = XLogReadBuffer(false, reln, xlrec->block);
if (!BufferIsValid(buffer))
- elog(STOP, "heap_clean_redo: no block");
+ elog(PANIC, "heap_clean_redo: no block");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "heap_clean_redo: uninitialized page");
+ elog(PANIC, "heap_clean_redo: uninitialized page");
if (XLByteLE(lsn, PageGetLSN(page)))
{
buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
- elog(STOP, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
+ elog(PANIC, "heap_delete_%sdo: no block", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "heap_delete_%sdo: uninitialized page", (redo) ? "re" : "un");
+ elog(PANIC, "heap_delete_%sdo: uninitialized page", (redo) ? "re" : "un");
if (redo)
{
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
- elog(STOP, "heap_delete_undo: bad page LSN");
+ elog(PANIC, "heap_delete_undo: bad page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp))
- elog(STOP, "heap_delete_%sdo: invalid lp", (redo) ? "re" : "un");
+ elog(PANIC, "heap_delete_%sdo: invalid lp", (redo) ? "re" : "un");
htup = (HeapTupleHeader) PageGetItem(page, lp);
return;
}
- elog(STOP, "heap_delete_undo: unimplemented");
+ elog(PANIC, "heap_delete_undo: unimplemented");
}
static void
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page) &&
(!redo || !(record->xl_info & XLOG_HEAP_INIT_PAGE)))
- elog(STOP, "heap_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
+ elog(PANIC, "heap_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
if (redo)
{
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
- elog(STOP, "heap_insert_redo: invalid max offset number");
+ elog(PANIC, "heap_insert_redo: invalid max offset number");
newlen = record->xl_len - SizeOfHeapInsert - SizeOfHeapHeader;
Assert(newlen <= MaxTupleSize);
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
- elog(STOP, "heap_insert_redo: failed to add tuple");
+ elog(PANIC, "heap_insert_redo: failed to add tuple");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
/* undo insert */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
- elog(STOP, "heap_insert_undo: bad page LSN");
+ elog(PANIC, "heap_insert_undo: bad page LSN");
- elog(STOP, "heap_insert_undo: unimplemented");
+ elog(PANIC, "heap_insert_undo: unimplemented");
}
/*
buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
- elog(STOP, "heap_update_%sdo: no block", (redo) ? "re" : "un");
+ elog(PANIC, "heap_update_%sdo: no block", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "heap_update_%sdo: uninitialized old page", (redo) ? "re" : "un");
+ elog(PANIC, "heap_update_%sdo: uninitialized old page", (redo) ? "re" : "un");
if (redo)
{
}
else if (XLByteLT(PageGetLSN(page), lsn)) /* changes are not applied
* ?! */
- elog(STOP, "heap_update_undo: bad old tuple page LSN");
+ elog(PANIC, "heap_update_undo: bad old tuple page LSN");
offnum = ItemPointerGetOffsetNumber(&(xlrec->target.tid));
if (PageGetMaxOffsetNumber(page) >= offnum)
lp = PageGetItemId(page, offnum);
if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsUsed(lp))
- elog(STOP, "heap_update_%sdo: invalid lp", (redo) ? "re" : "un");
+ elog(PANIC, "heap_update_%sdo: invalid lp", (redo) ? "re" : "un");
htup = (HeapTupleHeader) PageGetItem(page, lp);
goto newt;
}
- elog(STOP, "heap_update_undo: unimplemented");
+ elog(PANIC, "heap_update_undo: unimplemented");
/* Deal with new tuple */
newsame:;
if (PageIsNew((PageHeader) page) &&
(!redo || !(record->xl_info & XLOG_HEAP_INIT_PAGE)))
- elog(STOP, "heap_update_%sdo: uninitialized page", (redo) ? "re" : "un");
+ elog(PANIC, "heap_update_%sdo: uninitialized page", (redo) ? "re" : "un");
if (redo)
{
offnum = ItemPointerGetOffsetNumber(&(xlrec->newtid));
if (PageGetMaxOffsetNumber(page) + 1 < offnum)
- elog(STOP, "heap_update_redo: invalid max offset number");
+ elog(PANIC, "heap_update_redo: invalid max offset number");
hsize = SizeOfHeapUpdate + SizeOfHeapHeader;
if (move)
offnum = PageAddItem(page, (Item) htup, newlen, offnum,
LP_USED | OverwritePageMode);
if (offnum == InvalidOffsetNumber)
- elog(STOP, "heap_update_redo: failed to add tuple");
+ elog(PANIC, "heap_update_redo: failed to add tuple");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID); /* prev sui */
UnlockAndWriteBuffer(buffer);
/* undo */
if (XLByteLT(PageGetLSN(page), lsn)) /* changes not applied?! */
- elog(STOP, "heap_update_undo: bad new tuple page LSN");
+ elog(PANIC, "heap_update_undo: bad new tuple page LSN");
- elog(STOP, "heap_update_undo: unimplemented");
+ elog(PANIC, "heap_update_undo: unimplemented");
}
HeapTupleHeader htup;
if (!RelationIsValid(reln))
- elog(STOP, "_heap_unlock_tuple: can't open relation");
+ elog(PANIC, "_heap_unlock_tuple: can't open relation");
buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xltid->tid)));
if (!BufferIsValid(buffer))
- elog(STOP, "_heap_unlock_tuple: can't read buffer");
+ elog(PANIC, "_heap_unlock_tuple: can't read buffer");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "_heap_unlock_tuple: uninitialized page");
+ elog(PANIC, "_heap_unlock_tuple: uninitialized page");
offnum = ItemPointerGetOffsetNumber(&(xltid->tid));
if (offnum > PageGetMaxOffsetNumber(page))
- elog(STOP, "_heap_unlock_tuple: invalid itemid");
+ elog(PANIC, "_heap_unlock_tuple: invalid itemid");
lp = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(lp) || ItemIdDeleted(lp))
- elog(STOP, "_heap_unlock_tuple: unused/deleted tuple in rollback");
+ elog(PANIC, "_heap_unlock_tuple: unused/deleted tuple in rollback");
htup = (HeapTupleHeader) PageGetItem(page, lp);
if (!TransactionIdEquals(htup->t_xmax, GetCurrentTransactionId()) ||
htup->t_cmax != GetCurrentCommandId())
- elog(STOP, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
+ elog(PANIC, "_heap_unlock_tuple: invalid xmax/cmax in rollback");
htup->t_infomask &= ~HEAP_XMAX_UNLOGGED;
htup->t_infomask |= HEAP_XMAX_INVALID;
UnlockAndWriteBuffer(buffer);
else if (info == XLOG_HEAP_CLEAN)
heap_xlog_clean(true, lsn, record);
else
- elog(STOP, "heap_redo: unknown op code %u", info);
+ elog(PANIC, "heap_redo: unknown op code %u", info);
}
void
else if (info == XLOG_HEAP_CLEAN)
heap_xlog_clean(false, lsn, record);
else
- elog(STOP, "heap_undo: unknown op code %u", info);
+ elog(PANIC, "heap_undo: unknown op code %u", info);
}
static void
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.43 2001/10/25 05:49:21 momjian Exp $
+ * $Id: hio.c,v 1.44 2002/03/02 21:39:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tuple->t_len, InvalidOffsetNumber, LP_USED);
if (offnum == InvalidOffsetNumber)
- elog(STOP, "RelationPutHeapTuple: failed to add tuple");
+ elog(PANIC, "RelationPutHeapTuple: failed to add tuple");
/* Update tuple->t_self to the actual position where it was stored */
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
if (len > PageGetFreeSpace(pageHeader))
{
/* We should not get here given the test at the top */
- elog(STOP, "Tuple is too big: size %lu", (unsigned long) len);
+ elog(PANIC, "Tuple is too big: size %lu", (unsigned long) len);
}
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.88 2002/01/01 20:32:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.89 2002/03/02 21:39:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* If root page was splitted */
if (stack == (BTStack) NULL)
{
- elog(DEBUG, "btree: concurrent ROOT page split");
+ elog(LOG, "btree: concurrent ROOT page split");
/*
* If root page splitter failed to create new root page
item = (BTItem) PageGetItem(origpage, itemid);
if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree: failed to add hikey to the right sibling");
+ elog(PANIC, "btree: failed to add hikey to the right sibling");
rightoff = OffsetNumberNext(rightoff);
}
lhikey = item;
if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree: failed to add hikey to the left sibling");
+ elog(PANIC, "btree: failed to add hikey to the left sibling");
leftoff = OffsetNumberNext(leftoff);
/*
* the two items will go into positions P_HIKEY and P_FIRSTKEY.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree: failed to add leftkey to new root page");
+ elog(PANIC, "btree: failed to add leftkey to new root page");
pfree(new_item);
/*
* insert the right page pointer into the new root page.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree: failed to add rightkey to new root page");
+ elog(PANIC, "btree: failed to add rightkey to new root page");
pfree(new_item);
metad->btm_root = rootblknum;
if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree: failed to add item to the %s for %s",
+ elog(PANIC, "btree: failed to add item to the %s for %s",
where, RelationGetRelationName(rel));
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.87 2002/01/06 00:37:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.88 2002/03/02 21:39:18 momjian Exp $
*
*-------------------------------------------------------------------------
*/
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) from, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
- elog(STOP, "_bt_restore_page: can't add item to page");
+ elog(PANIC, "_bt_restore_page: can't add item to page");
from += itemsz;
}
}
buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
- elog(STOP, "btree_delete_redo: block unfound");
+ elog(PANIC, "btree_delete_redo: block unfound");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "btree_delete_redo: uninitialized page");
+ elog(PANIC, "btree_delete_redo: uninitialized page");
if (XLByteLE(lsn, PageGetLSN(page)))
{
buffer = XLogReadBuffer(false, reln,
ItemPointerGetBlockNumber(&(xlrec->target.tid)));
if (!BufferIsValid(buffer))
- elog(STOP, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
+ elog(PANIC, "btree_insert_%sdo: block unfound", (redo) ? "re" : "un");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "btree_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
+ elog(PANIC, "btree_insert_%sdo: uninitialized page", (redo) ? "re" : "un");
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
if (redo)
record->xl_len - SizeOfBtreeInsert,
ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
- elog(STOP, "btree_insert_redo: failed to add item");
+ elog(PANIC, "btree_insert_redo: failed to add item");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
else
{
if (XLByteLT(PageGetLSN(page), lsn))
- elog(STOP, "btree_insert_undo: bad page LSN");
+ elog(PANIC, "btree_insert_undo: bad page LSN");
if (!P_ISLEAF(pageop))
{
return;
}
- elog(STOP, "btree_insert_undo: unimplemented");
+ elog(PANIC, "btree_insert_undo: unimplemented");
}
return;
BlockIdGetBlockNumber(&(xlrec->otherblk));
buffer = XLogReadBuffer(false, reln, blkno);
if (!BufferIsValid(buffer))
- elog(STOP, "btree_split_%s: lost left sibling", op);
+ elog(PANIC, "btree_split_%s: lost left sibling", op);
page = (Page) BufferGetPage(buffer);
if (redo)
_bt_pageinit(page, BufferGetPageSize(buffer));
else if (PageIsNew((PageHeader) page))
- elog(STOP, "btree_split_undo: uninitialized left sibling");
+ elog(PANIC, "btree_split_undo: uninitialized left sibling");
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
if (redo)
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
- elog(STOP, "btree_split_undo: bad left sibling LSN");
- elog(STOP, "btree_split_undo: unimplemented");
+ elog(PANIC, "btree_split_undo: bad left sibling LSN");
+ elog(PANIC, "btree_split_undo: unimplemented");
}
/* Right (new) sibling */
ItemPointerGetBlockNumber(&(xlrec->target.tid));
buffer = XLogReadBuffer((redo) ? true : false, reln, blkno);
if (!BufferIsValid(buffer))
- elog(STOP, "btree_split_%s: lost right sibling", op);
+ elog(PANIC, "btree_split_%s: lost right sibling", op);
page = (Page) BufferGetPage(buffer);
if (redo)
_bt_pageinit(page, BufferGetPageSize(buffer));
else if (PageIsNew((PageHeader) page))
- elog(STOP, "btree_split_undo: uninitialized right sibling");
+ elog(PANIC, "btree_split_undo: uninitialized right sibling");
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
if (redo)
/* undo */
{
if (XLByteLT(PageGetLSN(page), lsn))
- elog(STOP, "btree_split_undo: bad right sibling LSN");
- elog(STOP, "btree_split_undo: unimplemented");
+ elog(PANIC, "btree_split_undo: bad right sibling LSN");
+ elog(PANIC, "btree_split_undo: unimplemented");
}
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
buffer = XLogReadBuffer(false, reln, blkno);
if (!BufferIsValid(buffer))
- elog(STOP, "btree_split_redo: lost next right page");
+ elog(PANIC, "btree_split_redo: lost next right page");
page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
- elog(STOP, "btree_split_redo: uninitialized next right page");
+ elog(PANIC, "btree_split_redo: uninitialized next right page");
if (XLByteLE(lsn, PageGetLSN(page)))
{
return;
buffer = XLogReadBuffer(true, reln, BlockIdGetBlockNumber(&(xlrec->rootblk)));
if (!BufferIsValid(buffer))
- elog(STOP, "btree_newroot_redo: no root page");
+ elog(PANIC, "btree_newroot_redo: no root page");
metabuf = XLogReadBuffer(false, reln, BTREE_METAPAGE);
if (!BufferIsValid(buffer))
- elog(STOP, "btree_newroot_redo: no metapage");
+ elog(PANIC, "btree_newroot_redo: no metapage");
page = (Page) BufferGetPage(buffer);
_bt_pageinit(page, BufferGetPageSize(buffer));
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(true, lsn, record);
else
- elog(STOP, "btree_redo: unknown op code %u", info);
+ elog(PANIC, "btree_redo: unknown op code %u", info);
}
void
else if (info == XLOG_BTREE_NEWROOT)
btree_xlog_newroot(false, lsn, record);
else
- elog(STOP, "btree_undo: unknown op code %u", info);
+ elog(PANIC, "btree_undo: unknown op code %u", info);
}
static void
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.69 2002/01/15 22:14:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.70 2002/03/02 21:39:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
rtree_redo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "rtree_redo: unimplemented");
+ elog(PANIC, "rtree_redo: unimplemented");
}
void
rtree_undo(XLogRecPtr lsn, XLogRecord *record)
{
- elog(STOP, "rtree_undo: unimplemented");
+ elog(PANIC, "rtree_undo: unimplemented");
}
void
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.7 2001/10/28 06:25:42 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/clog.c,v 1.8 2002/03/02 21:39:19 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (fd < 0)
{
if (errno != ENOENT || !InRecovery)
- elog(STOP, "open of %s failed: %m", path);
- elog(DEBUG, "clog file %s doesn't exist, reading as zeroes", path);
+ elog(PANIC, "open of %s failed: %m", path);
+ elog(LOG, "clog file %s doesn't exist, reading as zeroes", path);
MemSet(ClogCtl->page_buffer[slotno], 0, CLOG_BLCKSZ);
return;
}
if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
- elog(STOP, "lseek of clog file %u, offset %u failed: %m",
+ elog(PANIC, "lseek of clog file %u, offset %u failed: %m",
segno, offset);
errno = 0;
if (read(fd, ClogCtl->page_buffer[slotno], CLOG_BLCKSZ) != CLOG_BLCKSZ)
- elog(STOP, "read of clog file %u, offset %u failed: %m",
+ elog(PANIC, "read of clog file %u, offset %u failed: %m",
segno, offset);
close(fd);
if (fd < 0)
{
if (errno != ENOENT)
- elog(STOP, "open of %s failed: %m", path);
+ elog(PANIC, "open of %s failed: %m", path);
fd = BasicOpenFile(path, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "creation of file %s failed: %m", path);
+ elog(PANIC, "creation of file %s failed: %m", path);
}
if (lseek(fd, (off_t) offset, SEEK_SET) < 0)
- elog(STOP, "lseek of clog file %u, offset %u failed: %m",
+ elog(PANIC, "lseek of clog file %u, offset %u failed: %m",
segno, offset);
errno = 0;
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(STOP, "write of clog file %u, offset %u failed: %m",
+ elog(PANIC, "write of clog file %u, offset %u failed: %m",
segno, offset);
}
cldir = opendir(ClogDir);
if (cldir == NULL)
- elog(STOP, "could not open transaction-commit log directory (%s): %m",
+ elog(PANIC, "could not open transaction-commit log directory (%s): %m",
ClogDir);
errno = 0;
errno = 0;
}
if (errno)
- elog(STOP, "could not read transaction-commit log directory (%s): %m",
+ elog(PANIC, "could not read transaction-commit log directory (%s): %m",
ClogDir);
closedir(cldir);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.115 2001/11/01 06:17:01 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.116 2002/03/02 21:39:19 momjian Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
/* SHOULD REMOVE FILES OF ALL FAILED-TO-BE-CREATED RELATIONS */
}
else
- elog(STOP, "xact_redo: unknown op code %u", info);
+ elog(PANIC, "xact_redo: unknown op code %u", info);
}
void
uint8 info = record->xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT) /* shouldn't be called by XLOG */
- elog(STOP, "xact_undo: can't undo committed xaction");
+ elog(PANIC, "xact_undo: can't undo committed xaction");
else if (info != XLOG_XACT_ABORT)
- elog(STOP, "xact_redo: unknown op code %u", info);
+ elog(PANIC, "xact_redo: unknown op code %u", info);
}
void
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)
- elog(STOP, "XactPushRollback: already installed");
+ elog(PANIC, "XactPushRollback: already installed");
#endif
_RollbackFunc = func;
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.87 2002/02/18 05:44:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.88 2002/03/02 21:39:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (info & XLR_INFO_MASK)
{
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
- elog(STOP, "XLogInsert: invalid info mask %02X",
+ elog(PANIC, "XLogInsert: invalid info mask %02X",
(info & XLR_INFO_MASK));
no_tran = true;
info &= ~XLR_INFO_MASK;
}
}
if (i >= XLR_MAX_BKP_BLOCKS)
- elog(STOP, "XLogInsert: can backup %d blocks at most",
+ elog(PANIC, "XLogInsert: can backup %d blocks at most",
XLR_MAX_BKP_BLOCKS);
}
/* Break out of loop when rdt points to last list item */
* also remove the check for xl_len == 0 in ReadRecord, below.
*/
if (len == 0 || len > MAXLOGRECSZ)
- elog(STOP, "XLogInsert: invalid record length %u", len);
+ elog(PANIC, "XLogInsert: invalid record length %u", len);
START_CRIT_SECTION();
strcat(buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(buf, record->xl_info, rdata->data);
}
- elog(DEBUG, "%s", buf);
+ elog(LOG, "%s", buf);
}
/* Record begin of record in appropriate places */
* AdvanceXLInsertBuffer.
*/
if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[Write->curridx]))
- elog(STOP, "XLogWrite: write request %X/%X is past end of log %X/%X",
+ elog(PANIC, "XLogWrite: write request %X/%X is past end of log %X/%X",
LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
XLogCtl->xlblocks[Write->curridx].xlogid,
XLogCtl->xlblocks[Write->curridx].xrecoff);
if (openLogFile >= 0)
{
if (close(openLogFile) != 0)
- elog(STOP, "close of log file %u, segment %u failed: %m",
+ elog(PANIC, "close of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
openLogFile = -1;
}
(uint32) CheckPointSegments))
{
if (XLOG_DEBUG)
- elog(DEBUG, "XLogWrite: time for a checkpoint, signaling postmaster");
+ elog(LOG, "XLogWrite: time for a checkpoint, signaling postmaster");
SendPostmasterSignal(PMSIGNAL_DO_CHECKPOINT);
}
}
{
openLogOff = (LogwrtResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
if (lseek(openLogFile, (off_t) openLogOff, SEEK_SET) < 0)
- elog(STOP, "lseek of log file %u, segment %u, offset %u failed: %m",
+ elog(PANIC, "lseek of log file %u, segment %u, offset %u failed: %m",
openLogId, openLogSeg, openLogOff);
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(STOP, "write of log file %u, segment %u, offset %u failed: %m",
+ elog(PANIC, "write of log file %u, segment %u, offset %u failed: %m",
openLogId, openLogSeg, openLogOff);
}
openLogOff += BLCKSZ;
!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile) != 0)
- elog(STOP, "close of log file %u, segment %u failed: %m",
+ elog(PANIC, "close of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
openLogFile = -1;
}
if (XLOG_DEBUG)
{
- elog(DEBUG, "XLogFlush%s%s: request %X/%X; write %X/%X; flush %X/%X\n",
+ elog(LOG, "XLogFlush%s%s: request %X/%X; write %X/%X; flush %X/%X\n",
(IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
(InRedo) ? "(redo)" : "",
record.xlogid, record.xrecoff,
* problem; most likely, the requested flush point is past end of XLOG.
* This has been seen to occur when a disk page has a corrupted LSN.
*
- * Formerly we treated this as a STOP condition, but that hurts the
+ * Formerly we treated this as a PANIC condition, but that hurts the
* system's robustness rather than helping it: we do not want to take
* down the whole system due to corruption on one data page. In
* particular, if the bad page is encountered again during recovery then
* The current approach is to ERROR under normal conditions, but only
* NOTICE during recovery, so that the system can be brought up even if
* there's a corrupt LSN. Note that for calls from xact.c, the ERROR
- * will be promoted to STOP since xact.c calls this routine inside a
+ * will be promoted to PANIC since xact.c calls this routine inside a
* critical section. However, calls from bufmgr.c are not within
* critical sections and so we will not force a restart for a bad LSN
* on a data page.
if (fd < 0)
{
if (errno != ENOENT)
- elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
+ elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
path, log, seg);
}
else
fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "creation of file %s failed: %m", tmppath);
+ elog(PANIC, "creation of file %s failed: %m", tmppath);
/*
* Zero-fill the file. We have to do this the hard way to ensure that
/* if write didn't set errno, assume problem is no disk space */
errno = save_errno ? save_errno : ENOSPC;
- elog(STOP, "ZeroFill failed to write %s: %m", tmppath);
+ elog(PANIC, "ZeroFill failed to write %s: %m", tmppath);
}
}
if (pg_fsync(fd) != 0)
- elog(STOP, "fsync of file %s failed: %m", tmppath);
+ elog(PANIC, "fsync of file %s failed: %m", tmppath);
close(fd);
fd = BasicOpenFile(path, O_RDWR | PG_BINARY | XLOG_SYNC_BIT,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
+ elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
path, log, seg);
return (fd);
*/
#ifndef __BEOS__
if (link(tmppath, path) < 0)
- elog(STOP, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
+ elog(PANIC, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
tmppath, path, log, seg);
unlink(tmppath);
#else
if (rename(tmppath, path) < 0)
- elog(STOP, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
+ elog(PANIC, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
tmppath, path, log, seg);
#endif
path, log, seg);
return (fd);
}
- elog(STOP, "open of %s (log file %u, segment %u) failed: %m",
+ elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
path, log, seg);
}
xldir = opendir(XLogDir);
if (xldir == NULL)
- elog(STOP, "could not open transaction log directory (%s): %m",
+ elog(PANIC, "could not open transaction log directory (%s): %m",
XLogDir);
sprintf(lastoff, "%08X%08X", log, seg);
errno = 0;
}
if (errno)
- elog(STOP, "could not read transaction log directory (%s): %m",
+ elog(PANIC, "could not read transaction log directory (%s): %m",
XLogDir);
closedir(xldir);
}
* If RecPtr is not NULL, try to read a record at that position. Otherwise
* try to read a record just after the last one previously read.
*
- * If no valid record is available, returns NULL, or fails if emode is STOP.
- * (emode must be either STOP or LOG.)
+ * If no valid record is available, returns NULL, or fails if emode is PANIC.
+ * (emode must be either PANIC or LOG.)
*
* buffer is a workspace at least _INTL_MAXLOGRECSZ bytes long. It is needed
* to reassemble a record that crosses block boundaries. Note that on
tmpRecPtr.xrecoff += SizeOfXLogPHD;
}
else if (!XRecOffIsValid(RecPtr->xrecoff))
- elog(STOP, "ReadRecord: invalid record offset at %X/%X",
+ elog(PANIC, "ReadRecord: invalid record offset at %X/%X",
RecPtr->xlogid, RecPtr->xrecoff);
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
#ifdef USE_LOCALE
localeptr = setlocale(LC_COLLATE, NULL);
if (!localeptr)
- elog(STOP, "invalid LC_COLLATE setting");
+ elog(PANIC, "invalid LC_COLLATE setting");
StrNCpy(ControlFile->lc_collate, localeptr, LOCALE_NAME_BUFLEN);
localeptr = setlocale(LC_CTYPE, NULL);
if (!localeptr)
- elog(STOP, "invalid LC_CTYPE setting");
+ elog(PANIC, "invalid LC_CTYPE setting");
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/*
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
- elog(STOP, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
+ elog(PANIC, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
memset(buffer, 0, BLCKSZ);
memcpy(buffer, ControlFile, sizeof(ControlFileData));
fd = BasicOpenFile(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "WriteControlFile: could not create control file (%s): %m",
+ elog(PANIC, "WriteControlFile: could not create control file (%s): %m",
ControlFilePath);
errno = 0;
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(STOP, "WriteControlFile: write to control file failed: %m");
+ elog(PANIC, "WriteControlFile: write to control file failed: %m");
}
if (pg_fsync(fd) != 0)
- elog(STOP, "WriteControlFile: fsync of control file failed: %m");
+ elog(PANIC, "WriteControlFile: fsync of control file failed: %m");
close(fd);
}
*/
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "could not open control file (%s): %m", ControlFilePath);
+ elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
- elog(STOP, "read from control file failed: %m");
+ elog(PANIC, "read from control file failed: %m");
close(fd);
* more enlightening than complaining about wrong CRC.
*/
if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with PG_CONTROL_VERSION %d,\n"
"\tbut the server was compiled with PG_CONTROL_VERSION %d.\n"
"\tIt looks like you need to initdb.",
FIN_CRC64(crc);
if (!EQ_CRC64(crc, ControlFile->crc))
- elog(STOP, "invalid checksum in control file");
+ elog(PANIC, "invalid checksum in control file");
/*
* Do compatibility checking immediately. We do this here for 2
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
"\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->catalog_version_no, CATALOG_VERSION_NO);
if (ControlFile->blcksz != BLCKSZ)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with BLCKSZ %d,\n"
"\tbut the backend was compiled with BLCKSZ %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->blcksz, BLCKSZ);
if (ControlFile->relseg_size != RELSEG_SIZE)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with RELSEG_SIZE %d,\n"
"\tbut the backend was compiled with RELSEG_SIZE %d.\n"
"\tIt looks like you need to initdb.",
ControlFile->relseg_size, RELSEG_SIZE);
#ifdef USE_LOCALE
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with LC_COLLATE '%s',\n"
"\twhich is not recognized by setlocale().\n"
"\tIt looks like you need to initdb.",
ControlFile->lc_collate);
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with LC_CTYPE '%s',\n"
"\twhich is not recognized by setlocale().\n"
"\tIt looks like you need to initdb.",
#else /* not USE_LOCALE */
if (strcmp(ControlFile->lc_collate, "C") != 0 ||
strcmp(ControlFile->lc_ctype, "C") != 0)
- elog(STOP,
+ elog(PANIC,
"The database cluster was initialized with LC_COLLATE '%s' and\n"
"\tLC_CTYPE '%s', but the server was compiled without locale support.\n"
"\tIt looks like you need to initdb or recompile.",
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(STOP, "could not open control file (%s): %m", ControlFilePath);
+ elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
errno = 0;
if (write(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(STOP, "write to control file failed: %m");
+ elog(PANIC, "write to control file failed: %m");
}
if (pg_fsync(fd) != 0)
- elog(STOP, "fsync of control file failed: %m");
+ elog(PANIC, "fsync of control file failed: %m");
close(fd);
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(STOP, "BootStrapXLOG failed to write log file: %m");
+ elog(PANIC, "BootStrapXLOG failed to write log file: %m");
}
if (pg_fsync(openLogFile) != 0)
- elog(STOP, "BootStrapXLOG failed to fsync log file: %m");
+ elog(PANIC, "BootStrapXLOG failed to fsync log file: %m");
close(openLogFile);
openLogFile = -1;
ControlFile->state < DB_SHUTDOWNED ||
ControlFile->state > DB_IN_PRODUCTION ||
!XRecOffIsValid(ControlFile->checkPoint.xrecoff))
- elog(STOP, "control file context is broken");
+ elog(PANIC, "control file context is broken");
if (ControlFile->state == DB_SHUTDOWNED)
elog(LOG, "database system was shut down at %s",
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
- elog(STOP, "unable to locate a valid checkpoint record");
+ elog(PANIC, "unable to locate a valid checkpoint record");
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
elog(LOG, "next transaction id: %u; next oid: %u",
checkPoint.nextXid, checkPoint.nextOid);
if (!TransactionIdIsNormal(checkPoint.nextXid))
- elog(STOP, "invalid next transaction id");
+ elog(PANIC, "invalid next transaction id");
ShmemVariableCache->nextXid = checkPoint.nextXid;
ShmemVariableCache->nextOid = checkPoint.nextOid;
XLogCtl->RedoRecPtr = checkPoint.redo;
if (XLByteLT(RecPtr, checkPoint.redo))
- elog(STOP, "invalid redo in checkpoint record");
+ elog(PANIC, "invalid redo in checkpoint record");
if (checkPoint.undo.xrecoff == 0)
checkPoint.undo = RecPtr;
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
- elog(STOP, "invalid redo/undo record in shutdown checkpoint");
+ elog(PANIC, "invalid redo/undo record in shutdown checkpoint");
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
/* Is REDO required ? */
if (XLByteLT(checkPoint.redo, RecPtr))
- record = ReadRecord(&(checkPoint.redo), STOP, buffer);
+ record = ReadRecord(&(checkPoint.redo), PANIC, buffer);
else
{
/* read past CheckPoint record */
strcat(buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(buf,
record->xl_info, XLogRecGetData(record));
- elog(DEBUG, "%s", buf);
+ elog(LOG, "%s", buf);
}
if (record->xl_info & XLR_BKP_BLOCK_MASK)
* Init xlog buffer cache using the block containing the last valid
* record from the previous incarnation.
*/
- record = ReadRecord(&LastRec, STOP, buffer);
+ record = ReadRecord(&LastRec, PANIC, buffer);
EndOfLog = EndRecPtr;
XLByteToPrevSeg(EndOfLog, openLogId, openLogSeg);
openLogFile = XLogFileOpen(openLogId, openLogSeg, false);
RecPtr.xlogid, RecPtr.xrecoff);
do
{
- record = ReadRecord(&RecPtr, STOP, buffer);
+ record = ReadRecord(&RecPtr, PANIC, buffer);
if (TransactionIdIsValid(record->xl_xid) &&
!TransactionIdDidCommit(record->xl_xid))
RmgrTable[record->xl_rmid].rm_undo(EndRecPtr, record);
checkPoint.undo = GetUndoRecPtr();
if (shutdown && checkPoint.undo.xrecoff != 0)
- elog(STOP, "active transaction while database system is shutting down");
+ elog(PANIC, "active transaction while database system is shutting down");
#endif
/*
* recptr = end of actual checkpoint record.
*/
if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
- elog(STOP, "concurrent transaction log activity while database system is shutting down");
+ elog(PANIC, "concurrent transaction log activity while database system is shutting down");
/*
* Select point at which we can truncate the log, which we base on the
if (openLogFile >= 0)
{
if (pg_fsync(openLogFile) != 0)
- elog(STOP, "fsync of log file %u, segment %u failed: %m",
+ elog(PANIC, "fsync of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
- elog(STOP, "close of log file %u, segment %u failed: %m",
+ elog(PANIC, "close of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
openLogFile = -1;
}
{
case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
- elog(STOP, "fsync of log file %u, segment %u failed: %m",
+ elog(PANIC, "fsync of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
- elog(STOP, "fdatasync of log file %u, segment %u failed: %m",
+ elog(PANIC, "fdatasync of log file %u, segment %u failed: %m",
openLogId, openLogSeg);
break;
#endif
/* write synced it already */
break;
default:
- elog(STOP, "bogus wal_sync_method %d", sync_method);
+ elog(PANIC, "bogus wal_sync_method %d", sync_method);
break;
}
}
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.21 2001/10/25 05:49:22 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlogutils.c,v 1.22 2002/03/02 21:39:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
hentry = (XLogRelCacheEntry *) hash_search(_xlrelcache,
(void *) &(rdesc->reldata.rd_node), HASH_REMOVE, NULL);
if (hentry == NULL)
- elog(STOP, "_xl_remove_hash_entry: file was not found in cache");
+ elog(PANIC, "_xl_remove_hash_entry: file was not found in cache");
if (rdesc->reldata.rd_fd >= 0)
smgrclose(DEFAULT_SMGR, &(rdesc->reldata));
hash_search(_xlrelcache, (void *) &rnode, HASH_ENTER, &found);
if (hentry == NULL)
- elog(STOP, "XLogOpenRelation: out of memory for cache");
+ elog(PANIC, "XLogOpenRelation: out of memory for cache");
if (found)
- elog(STOP, "XLogOpenRelation: file found on insert into cache");
+ elog(PANIC, "XLogOpenRelation: file found on insert into cache");
hentry->rdesc = res;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootparse.y,v 1.39 2001/09/29 04:02:22 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootparse.y,v 1.40 2002/03/02 21:39:20 momjian Exp $
*
*-------------------------------------------------------------------------
*/
do_start()
{
StartTransactionCommand();
- if (DebugMode)
- elog(DEBUG, "start transaction");
+ elog(DEBUG3, "start transaction");
}
do_end()
{
CommitTransactionCommand();
- if (DebugMode)
- elog(DEBUG, "commit transaction");
+ elog(DEBUG3, "commit transaction");
if (isatty(0))
{
printf("bootstrap> ");
{
do_start();
numattr = 0;
- if (DebugMode)
- {
- if ($2)
- elog(DEBUG, "creating bootstrap relation %s...",
- LexIDStr($4));
- else
- elog(DEBUG, "creating relation %s...",
- LexIDStr($4));
- }
+ if ($2)
+ elog(DEBUG3, "creating bootstrap relation %s...",
+ LexIDStr($4));
+ else
+ elog(DEBUG3, "creating relation %s...",
+ LexIDStr($4));
}
boot_typelist
{
if (reldesc)
{
- elog(DEBUG, "create bootstrap: warning, open relation exists, closing first");
+ elog(DEBUG3, "create bootstrap: warning, open relation exists, closing first");
closerel(NULL);
}
reldesc = heap_create(LexIDStr($4), tupdesc,
false, true, true);
reldesc->rd_rel->relhasoids = ! ($3);
- if (DebugMode)
- elog(DEBUG, "bootstrap relation created");
+ elog(DEBUG3, "bootstrap relation created");
}
else
{
! ($3),
false,
true);
- if (DebugMode)
- elog(DEBUG, "relation created with oid %u", id);
+ elog(DEBUG3, "relation created with oid %u", id);
}
do_end();
}
INSERT_TUPLE optoideq
{
do_start();
- if (DebugMode)
- {
- if ($2)
- elog(DEBUG, "inserting row with oid %u...", $2);
- else
- elog(DEBUG, "inserting row...");
- }
+ if ($2)
+ elog(DEBUG3, "inserting row with oid %u...", $2);
+ else
+ elog(DEBUG3, "inserting row...");
num_columns_read = 0;
}
LPAREN boot_tuplelist RPAREN
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.121 2002/02/23 01:31:34 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/bootstrap/bootstrap.c,v 1.122 2002/03/02 21:39:21 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Datum values[MAXATTR]; /* corresponding attribute values */
int numattr; /* number of attributes for cur. rel */
-int DebugMode;
-
static MemoryContext nogc = NULL; /* special no-gc mem context */
extern int optind;
{
fprintf(stderr,
gettext("Usage:\n"
- " postgres -boot [-d] [-D datadir] [-F] [-o file] [-x num] dbname\n"
- " -d debug mode\n"
+ " postgres -boot [-d level] [-D datadir] [-F] [-o file] [-x num] dbname\n"
+ " -d 1-5 debug mode\n"
" -D datadir data directory\n"
" -F turn off fsync\n"
" -o file send debug output to file\n"
potential_DataDir = optarg;
break;
case 'd':
- DebugMode = true; /* print out debugging info while
- * parsing */
+ {
+ /* Turn on debugging for the postmaster. */
+ char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
+ sprintf(debugstr, "debug%s", optarg);
+ /* We use PGC_S_SESSION because we will reset in backend */
+ SetConfigOption("server_min_messages", debugstr, PGC_POSTMASTER, PGC_S_ARGV);
+ SetConfigOption("client_min_messages", debugstr, PGC_POSTMASTER, PGC_S_ARGV);
+ pfree(debugstr);
break;
+ }
+ break;
case 'F':
SetConfigOption("fsync", "false", PGC_POSTMASTER, PGC_S_ARGV);
break;
proc_exit(0); /* done */
default:
- elog(STOP, "Unsupported XLOG op %d", xlogop);
+ elog(PANIC, "Unsupported XLOG op %d", xlogop);
proc_exit(0);
}
if (reldesc != NULL)
closerel(NULL);
- if (DebugMode)
- elog(DEBUG, "open relation %s, attrsize %d", relname ? relname : "(null)",
- (int) ATTRIBUTE_TUPLE_SIZE);
+ elog(DEBUG3, "open relation %s, attrsize %d", relname ? relname : "(null)",
+ (int) ATTRIBUTE_TUPLE_SIZE);
reldesc = heap_openr(relname, NoLock);
numattr = reldesc->rd_rel->relnatts;
else
attrtypes[i]->attisset = false;
- if (DebugMode)
{
Form_pg_attribute at = attrtypes[i];
- elog(DEBUG, "create attribute %d name %s len %d num %d type %u",
+ elog(DEBUG3, "create attribute %d name %s len %d num %d type %u",
i, NameStr(at->attname), at->attlen, at->attnum,
- at->atttypid
- );
+ at->atttypid);
}
}
}
elog(ERROR, "no open relation to close");
else
{
- if (DebugMode)
- elog(DEBUG, "close relation %s", relname ? relname : "(null)");
+ elog(DEBUG3, "close relation %s", relname ? relname : "(null)");
heap_close(reldesc, NoLock);
reldesc = (Relation) NULL;
}
if (reldesc != NULL)
{
- elog(DEBUG, "warning: no open relations allowed with 'create' command");
+ elog(LOG, "warning: no open relations allowed with 'create' command");
closerel(relname);
}
{
attrtypes[attnum]->atttypid = Ap->am_oid;
namestrcpy(&attrtypes[attnum]->attname, name);
- if (DebugMode)
- elog(DEBUG, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
+ elog(DEBUG3, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Ap->am_typ.typlen;
attrtypes[attnum]->attbyval = Ap->am_typ.typbyval;
{
attrtypes[attnum]->atttypid = Procid[typeoid].oid;
namestrcpy(&attrtypes[attnum]->attname, name);
- if (DebugMode)
- elog(DEBUG, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
+ elog(DEBUG3, "column %s %s", NameStr(attrtypes[attnum]->attname), type);
attrtypes[attnum]->attnum = 1 + attnum; /* fillatt */
attlen = attrtypes[attnum]->attlen = Procid[typeoid].len;
attrtypes[attnum]->attstorage = 'p';
TupleDesc tupDesc;
int i;
- if (DebugMode)
- elog(DEBUG, "inserting row oid %u, %d columns", objectid, numattr);
+ elog(DEBUG3, "inserting row oid %u, %d columns", objectid, numattr);
tupDesc = CreateTupleDesc(numattr, attrtypes);
tuple = heap_formtuple(tupDesc, values, Blanks);
tuple->t_data->t_oid = objectid;
heap_insert(reldesc, tuple);
heap_freetuple(tuple);
- if (DebugMode)
- elog(DEBUG, "row inserted");
+ elog(DEBUG3, "row inserted");
/*
* Reset blanks for next tuple
AssertArg(i >= 0 || i < MAXATTR);
- if (DebugMode)
- elog(DEBUG, "inserting column %d value '%s'", i, value);
+ elog(DEBUG3, "inserting column %d value '%s'", i, value);
if (Typ != (struct typmap **) NULL)
{
struct typmap *ap;
- if (DebugMode)
- elog(DEBUG, "Typ != NULL");
+ elog(DEBUG3, "Typ != NULL");
app = Typ;
while (*app && (*app)->am_oid != reldesc->rd_att->attrs[i]->atttypid)
++app;
values[i],
ObjectIdGetDatum(ap->am_typ.typelem),
Int32GetDatum(-1)));
- if (DebugMode)
- elog(DEBUG, " -> %s", prt);
+ elog(DEBUG3, " -> %s", prt);
pfree(prt);
}
else
}
if (typeindex >= n_types)
elog(ERROR, "type oid %u not found", attrtypes[i]->atttypid);
- if (DebugMode)
- elog(DEBUG, "Typ == NULL, typeindex = %u", typeindex);
+ elog(DEBUG3, "Typ == NULL, typeindex = %u", typeindex);
values[i] = OidFunctionCall3(Procid[typeindex].inproc,
CStringGetDatum(value),
ObjectIdGetDatum(Procid[typeindex].elem),
values[i],
ObjectIdGetDatum(Procid[typeindex].elem),
Int32GetDatum(-1)));
- if (DebugMode)
- elog(DEBUG, " -> %s", prt);
+ elog(DEBUG3, " -> %s", prt);
pfree(prt);
}
- if (DebugMode)
- elog(DEBUG, "inserted");
+ elog(DEBUG3, "inserted");
}
/* ----------------
void
InsertOneNull(int i)
{
- if (DebugMode)
- elog(DEBUG, "inserting column %d NULL", i);
+ elog(DEBUG3, "inserting column %d NULL", i);
Assert(i >= 0 || i < MAXATTR);
values[i] = PointerGetDatum(NULL);
Blanks[i] = 'n';
if (strncmp(type, Procid[i].name, NAMEDATALEN) == 0)
return i;
}
- if (DebugMode)
- elog(DEBUG, "external type: %s", type);
+ elog(DEBUG3, "external type: %s", type);
rel = heap_openr(TypeRelationName, NoLock);
scan = heap_beginscan(rel, 0, SnapshotNow, 0, (ScanKey) NULL);
i = 0;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.54 2002/02/18 23:11:07 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/catalog/aclchk.c,v 1.55 2002/03/02 21:39:22 momjian Exp $
*
* NOTES
* See acl.h.
int i;
AclItem *aip;
- elog(DEBUG, "acl size = %d, # acls = %d",
+ elog(LOG, "acl size = %d, # acls = %d",
ACL_SIZE(acl), ACL_NUM(acl));
aip = ACL_DAT(acl);
for (i = 0; i < ACL_NUM(acl); ++i)
- elog(DEBUG, " acl[%d]: %s", i,
+ elog(LOG, " acl[%d]: %s", i,
DatumGetCString(DirectFunctionCall1(aclitemout,
PointerGetDatum(aip + i))));
}
Oid argoids[FUNC_MAX_ARGS];
int i;
int16 argcount;
-
+
MemSet(argoids, 0, FUNC_MAX_ARGS * sizeof(Oid));
argcount = length(arguments);
if (argcount > FUNC_MAX_ARGS)
{
TypeName *t = (TypeName *) lfirst(arguments);
char *typnam = TypeNameToInternalName(t);
-
+
arguments = lnext(arguments);
-
+
if (strcmp(typnam, "opaque") == 0)
argoids[i] = InvalidOid;
else
func_error(NULL, name, argcount, argoids, NULL);
return oid;
-}
+}
static void
*/
if (!acl)
{
- elog(DEBUG, "aclcheck: null ACL, returning OK");
+ elog(LOG, "aclcheck: null ACL, returning OK");
return ACLCHECK_OK;
}
*/
if (num < 1)
{
- elog(DEBUG, "aclcheck: zero-length ACL, returning OK");
+ elog(LOG, "aclcheck: zero-length ACL, returning OK");
return ACLCHECK_OK;
}
if (aidat->ai_mode & mode)
{
#ifdef ACLDEBUG
- elog(DEBUG, "aclcheck: using world=%d", aidat->ai_mode);
+ elog(LOG, "aclcheck: using world=%d", aidat->ai_mode);
#endif
return ACLCHECK_OK;
}
if (aip->ai_id == id)
{
#ifdef ACLDEBUG
- elog(DEBUG, "aclcheck: found user %u/%d",
+ elog(LOG, "aclcheck: found user %u/%d",
aip->ai_id, aip->ai_mode);
#endif
if (aip->ai_mode & mode)
if (in_group(id, aip->ai_id))
{
#ifdef ACLDEBUG
- elog(DEBUG, "aclcheck: found group %u/%d",
+ elog(LOG, "aclcheck: found group %u/%d",
aip->ai_id, aip->ai_mode);
#endif
return ACLCHECK_OK;
if (aip->ai_id == id)
{
#ifdef ACLDEBUG
- elog(DEBUG, "aclcheck: found group %u/%d",
+ elog(LOG, "aclcheck: found group %u/%d",
aip->ai_id, aip->ai_mode);
#endif
if (aip->ai_mode & mode)
!((Form_pg_shadow) GETSTRUCT(tuple))->usecatupd)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_aclcheck: catalog update to \"%s\": permission denied",
+ elog(LOG, "pg_aclcheck: catalog update to \"%s\": permission denied",
relname);
#endif
ReleaseSysCache(tuple);
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_aclcheck: \"%s\" is superuser",
+ elog(LOG, "pg_aclcheck: \"%s\" is superuser",
usename);
#endif
ReleaseSysCache(tuple);
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
+ elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
usename);
#endif
ReleaseSysCache(tuple);
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
+ elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
usename);
#endif
ReleaseSysCache(tuple);
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_ownercheck: user \"%s\" is superuser",
+ elog(LOG, "pg_ownercheck: user \"%s\" is superuser",
usename);
#endif
ReleaseSysCache(tuple);
if (((Form_pg_shadow) GETSTRUCT(tuple))->usesuper)
{
#ifdef ACLDEBUG
- elog(DEBUG, "pg_aggr_ownercheck: user \"%s\" is superuser",
+ elog(LOG, "pg_aggr_ownercheck: user \"%s\" is superuser",
usename);
#endif
ReleaseSysCache(tuple);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.26 2002/02/18 16:04:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/analyze.c,v 1.27 2002/03/02 21:39:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
#define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
-
-static int MESSAGE_LEVEL;
+static int elevel = -1;
/* context information for compare_scalars() */
static FmgrInfo *datumCmpFn;
HeapTuple tuple;
if (vacstmt->verbose)
- MESSAGE_LEVEL = NOTICE;
+ elevel = INFO;
else
- MESSAGE_LEVEL = DEBUG;
-
+ elevel = DEBUG1;
+
/*
* Begin a transaction for analyzing this relation.
*
return;
}
- elog(MESSAGE_LEVEL, "Analyzing %s", RelationGetRelationName(onerel));
+ elog(elevel, "Analyzing %s", RelationGetRelationName(onerel));
/*
* Determine which columns to analyze
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.81 2001/10/25 05:49:23 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/async.c,v 1.82 2002/03/02 21:39:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Async_Notify(char *relname)
{
if (Trace_notify)
- elog(DEBUG, "Async_Notify: %s", relname);
+ elog(LOG, "Async_Notify: %s", relname);
/* no point in making duplicate entries in the list ... */
if (!AsyncExistsPendingNotify(relname))
bool alreadyListener = false;
if (Trace_notify)
- elog(DEBUG, "Async_Listen: %s", relname);
+ elog(LOG, "Async_Listen: %s", relname);
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
}
if (Trace_notify)
- elog(DEBUG, "Async_Unlisten %s", relname);
+ elog(LOG, "Async_Unlisten %s", relname);
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
ScanKeyData key[1];
if (Trace_notify)
- elog(DEBUG, "Async_UnlistenAll");
+ elog(LOG, "Async_UnlistenAll");
lRel = heap_openr(ListenerRelationName, AccessExclusiveLock);
tdesc = RelationGetDescr(lRel);
}
if (Trace_notify)
- elog(DEBUG, "AtCommit_Notify");
+ elog(LOG, "AtCommit_Notify");
/* preset data to update notify column to MyProcPid */
nulls[0] = nulls[1] = nulls[2] = ' ';
*/
if (Trace_notify)
- elog(DEBUG, "AtCommit_Notify: notifying self");
+ elog(LOG, "AtCommit_Notify: notifying self");
NotifyMyFrontEnd(relname, listenerPID);
}
else
{
if (Trace_notify)
- elog(DEBUG, "AtCommit_Notify: notifying pid %d",
+ elog(LOG, "AtCommit_Notify: notifying pid %d",
listenerPID);
/*
ClearPendingNotifies();
if (Trace_notify)
- elog(DEBUG, "AtCommit_Notify: done");
+ elog(LOG, "AtCommit_Notify: done");
}
/*
{
/* Here, it is finally safe to do stuff. */
if (Trace_notify)
- elog(DEBUG, "Async_NotifyHandler: perform async notify");
+ elog(LOG, "Async_NotifyHandler: perform async notify");
ProcessIncomingNotify();
if (Trace_notify)
- elog(DEBUG, "Async_NotifyHandler: done");
+ elog(LOG, "Async_NotifyHandler: done");
}
}
}
if (notifyInterruptOccurred)
{
if (Trace_notify)
- elog(DEBUG, "EnableNotifyInterrupt: perform async notify");
+ elog(LOG, "EnableNotifyInterrupt: perform async notify");
ProcessIncomingNotify();
if (Trace_notify)
- elog(DEBUG, "EnableNotifyInterrupt: done");
+ elog(LOG, "EnableNotifyInterrupt: done");
}
}
}
nulls[Natts_pg_listener];
if (Trace_notify)
- elog(DEBUG, "ProcessIncomingNotify");
+ elog(LOG, "ProcessIncomingNotify");
set_ps_display("async_notify");
/* Notify the frontend */
if (Trace_notify)
- elog(DEBUG, "ProcessIncomingNotify: received %s from %d",
+ elog(LOG, "ProcessIncomingNotify: received %s from %d",
relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
set_ps_display("idle");
if (Trace_notify)
- elog(DEBUG, "ProcessIncomingNotify: done");
+ elog(LOG, "ProcessIncomingNotify: done");
}
/*
*/
}
else
- elog(NOTICE, "NOTIFY for %s", relname);
+ elog(INFO, "NOTIFY for %s", relname);
}
/* Does pendingNotifies include the given relname? */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.156 2002/02/27 19:34:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/command.c,v 1.157 2002/03/02 21:39:22 momjian Exp $
*
* NOTES
* The PerformAddAttribute() code, like most of the relation
constrName);
/* Otherwise if more than one constraint deleted, notify */
else if (deleted > 1)
- elog(NOTICE, "Multiple constraints dropped");
-
+ elog(INFO, "Multiple constraints dropped");
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.81 2001/10/25 05:49:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/Attic/creatinh.c,v 1.82 2002/03/02 21:39:22 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Yes, try to merge the two column definitions. They must
* have the same type and typmod.
*/
- elog(NOTICE, "CREATE TABLE: merging multiple inherited definitions of attribute \"%s\"",
+ elog(INFO, "CREATE TABLE: merging multiple inherited definitions of attribute \"%s\"",
attributeName);
def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
if (strcmp(def->typename->name, attributeType) != 0 ||
* Yes, try to merge the two column definitions. They must
* have the same type and typmod.
*/
- elog(NOTICE, "CREATE TABLE: merging attribute \"%s\" with inherited definition",
+ elog(INFO, "CREATE TABLE: merging attribute \"%s\" with inherited definition",
attributeName);
def = (ColumnDef *) nth(exist_attno - 1, inhSchema);
if (strcmp(def->typename->name, attributeType) != 0 ||
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.65 2002/02/18 23:11:10 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/define.c,v 1.66 2002/03/02 21:39:23 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
else if (strcasecmp(defel->defname, "precedence") == 0)
{
/* NOT IMPLEMENTED (never worked in v4.2) */
- elog(NOTICE, "CREATE OPERATOR: precedence not implemented");
+ elog(INFO, "CREATE OPERATOR: precedence not implemented");
}
else if (strcasecmp(defel->defname, "associativity") == 0)
{
/* NOT IMPLEMENTED (never worked in v4.2) */
- elog(NOTICE, "CREATE OPERATOR: associativity not implemented");
+ elog(INFO, "CREATE OPERATOR: associativity not implemented");
}
else if (strcasecmp(defel->defname, "commutator") == 0)
commutatorName = defGetString(defel);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.68 2002/02/26 22:47:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/explain.c,v 1.69 2002/03/02 21:39:23 momjian Exp $
*
*/
/* rewriter will not cope with utility statements */
if (query->commandType == CMD_UTILITY)
{
- elog(NOTICE, "Utility statements have no plan structure");
+ elog(INFO, "Utility statements have no plan structure");
return;
}
/* In the case of an INSTEAD NOTHING, tell at least that */
if (rewritten == NIL)
{
- elog(NOTICE, "Query rewrites to nothing");
+ elog(INFO, "Query rewrites to nothing");
return;
}
if (query->commandType == CMD_UTILITY)
{
if (query->utilityStmt && IsA(query->utilityStmt, NotifyStmt))
- elog(NOTICE, "QUERY PLAN:\n\nNOTIFY\n");
+ elog(INFO, "QUERY PLAN:\n\nNOTIFY\n");
else
- elog(NOTICE, "QUERY PLAN:\n\nUTILITY\n");
+ elog(INFO, "QUERY PLAN:\n\nUTILITY\n");
return;
}
s = nodeToString(plan);
if (s)
{
- elog(NOTICE, "QUERY DUMP:\n\n%s", s);
+ elog(INFO, "QUERY DUMP:\n\n%s", s);
pfree(s);
}
}
if (analyze)
appendStringInfo(str, "Total runtime: %.2f msec\n",
1000.0 * totaltime);
- elog(NOTICE, "QUERY PLAN:\n\n%s", str->data);
+ elog(INFO, "QUERY PLAN:\n\n%s", str->data);
pfree(str->data);
pfree(str);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.68 2002/01/11 18:16:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/sequence.c,v 1.69 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
sequence_magic *sm;
if (info != XLOG_SEQ_LOG)
- elog(STOP, "seq_redo: unknown op code %u", info);
+ elog(PANIC, "seq_redo: unknown op code %u", info);
reln = XLogOpenRelation(true, RM_SEQ_ID, xlrec->node);
if (!RelationIsValid(reln))
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
- elog(STOP, "seq_redo: can't read block of %u/%u",
+ elog(PANIC, "seq_redo: can't read block of %u/%u",
xlrec->node.tblNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
itemsz = MAXALIGN(itemsz);
if (PageAddItem(page, (Item) item, itemsz,
FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
- elog(STOP, "seq_redo: failed to add item to page");
+ elog(PANIC, "seq_redo: failed to add item to page");
PageSetLSN(page, lsn);
PageSetSUI(page, ThisStartUpID);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.102 2002/02/19 20:11:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/trigger.c,v 1.103 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
elog(ERROR, "DropTrigger: there is no trigger %s on relation %s",
stmt->trigname, stmt->relname);
if (tgfound > 1)
- elog(NOTICE, "DropTrigger: found (and deleted) %d triggers %s on relation %s",
+ elog(INFO, "DropTrigger: found (and deleted) %d triggers %s on relation %s",
tgfound, stmt->trigname, stmt->relname);
/*
stmt.relname = pstrdup(RelationGetRelationName(refrel));
heap_close(refrel, NoLock);
- elog(NOTICE, "DROP TABLE implicitly drops referential integrity trigger from table \"%s\"", stmt.relname);
+ elog(INFO, "DROP TABLE implicitly drops referential integrity trigger from table \"%s\"", stmt.relname);
DropTrigger(&stmt);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.91 2002/03/01 22:45:08 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/user.c,v 1.92 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* changes to the flat password file cannot be rolled back */
if (IsTransactionBlock() && password)
- elog(NOTICE, "ALTER USER: password changes cannot be rolled back");
+ elog(INFO, "ALTER USER: password changes cannot be rolled back");
/*
* Scan the pg_shadow relation to be certain the user exists. Note we
elog(ERROR, "DROP USER: permission denied");
if (IsTransactionBlock())
- elog(NOTICE, "DROP USER cannot be rolled back completely");
+ elog(INFO, "DROP USER cannot be rolled back completely");
/*
* Scan the pg_shadow relation to find the usesysid of the user to be
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.214 2002/02/19 20:11:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.215 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static MemoryContext vac_context = NULL;
-static int MESSAGE_LEVEL; /* message level */
+static int elevel = -1;
static TransactionId OldestXmin;
static TransactionId FreezeLimit;
pgstat_vacuum_tabstat();
if (vacstmt->verbose)
- MESSAGE_LEVEL = NOTICE;
+ elevel = INFO;
else
- MESSAGE_LEVEL = DEBUG;
-
+ elevel = DEBUG1;
+
/*
* Create special memory context for cross-transaction storage.
*
vac_init_rusage(&ru0);
relname = RelationGetRelationName(onerel);
- elog(MESSAGE_LEVEL, "--Relation %s--", relname);
+ elog(elevel, "--Relation %s--", relname);
empty_pages = new_pages = changed_pages = empty_end_pages = 0;
num_tuples = tups_vacuumed = nkeep = nunused = 0;
pfree(vtlinks);
}
- elog(MESSAGE_LEVEL, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
+ elog(elevel, "Pages %u: Changed %u, reaped %u, Empty %u, New %u; \
Tup %.0f: Vac %.0f, Keep/VTL %.0f/%u, UnUsed %.0f, MinLen %lu, MaxLen %lu; \
Re-using: Free/Avail. Space %.0f/%.0f; EndEmpty/Avail. Pages %u/%u.\n\t%s",
nblocks, changed_pages, vacuum_pages->num_pages, empty_pages,
InvalidOffsetNumber, LP_USED);
if (newoff == InvalidOffsetNumber)
{
- elog(STOP, "moving chain: failed to add item with len = %lu to page %u",
+ elog(PANIC, "moving chain: failed to add item with len = %lu to page %u",
(unsigned long) tuple_len, destvacpage->blkno);
}
newitemid = PageGetItemId(ToPage, newoff);
InvalidOffsetNumber, LP_USED);
if (newoff == InvalidOffsetNumber)
{
- elog(STOP, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
+ elog(PANIC, "failed to add item with len = %lu to page %u (free space %lu, nusd %u, noff %u)",
(unsigned long) tuple_len,
cur_page->blkno, (unsigned long) cur_page->free,
cur_page->offsets_used, cur_page->offsets_free);
}
Assert(num_moved == checked_moved);
- elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
+ elog(elevel, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u.\n\t%s",
RelationGetRelationName(onerel),
nblocks, blkno, num_moved,
vac_show_rusage(&ru0));
/* truncate relation if there are some empty end-pages */
if (vacuum_pages->empty_end_pages > 0)
{
- elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u.",
+ elog(elevel, "Rel %s: Pages: %u --> %u.",
RelationGetRelationName(onerel),
vacrelstats->rel_pages, relblocks);
relblocks = smgrtruncate(DEFAULT_SMGR, onerel, relblocks);
stats->num_pages, stats->num_index_tuples,
false);
- elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
+ elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
RelationGetRelationName(indrel),
stats->num_pages, stats->num_index_tuples,
vac_show_rusage(&ru0));
stats->num_pages, stats->num_index_tuples,
false);
- elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
+ elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
RelationGetRelationName(indrel), stats->num_pages,
stats->num_index_tuples - keep_tuples, stats->tuples_removed,
vac_show_rusage(&ru0));
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.11 2002/01/06 00:37:44 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuumlazy.c,v 1.12 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} LVRelStats;
-static int MESSAGE_LEVEL; /* message level */
+static int elevel = -1;
static TransactionId OldestXmin;
static TransactionId FreezeLimit;
bool hasindex;
BlockNumber possibly_freeable;
- /* initialize */
if (vacstmt->verbose)
- MESSAGE_LEVEL = NOTICE;
+ elevel = INFO;
else
- MESSAGE_LEVEL = DEBUG;
-
+ elevel = DEBUG1;
+
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
vac_init_rusage(&ru0);
relname = RelationGetRelationName(onerel);
- elog(MESSAGE_LEVEL, "--Relation %s--", relname);
+ elog(elevel, "--Relation %s--", relname);
empty_pages = changed_pages = 0;
num_tuples = tups_vacuumed = nkeep = nunused = 0;
lazy_scan_index(Irel[i], vacrelstats);
}
- elog(MESSAGE_LEVEL, "Pages %u: Changed %u, Empty %u; \
+ elog(elevel, "Pages %u: Changed %u, Empty %u; \
Tup %.0f: Vac %.0f, Keep %.0f, UnUsed %.0f.\n\tTotal %s",
nblocks, changed_pages, empty_pages,
num_tuples, tups_vacuumed, nkeep, nunused,
npages++;
}
- elog(MESSAGE_LEVEL, "Removed %d tuples in %d pages.\n\t%s",
- tupindex, npages,
+ elog(elevel, "Removed %d tuples in %d pages.\n\t%s", tupindex, npages,
vac_show_rusage(&ru0));
}
stats->num_pages, stats->num_index_tuples,
false);
- elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
+ elog(elevel, "Index %s: Pages %u; Tuples %.0f.\n\t%s",
RelationGetRelationName(indrel),
stats->num_pages, stats->num_index_tuples,
vac_show_rusage(&ru0));
stats->num_pages, stats->num_index_tuples,
false);
- elog(MESSAGE_LEVEL, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
+ elog(elevel, "Index %s: Pages %u; Tuples %.0f: Deleted %.0f.\n\t%s",
RelationGetRelationName(indrel), stats->num_pages,
stats->num_index_tuples, stats->tuples_removed,
vac_show_rusage(&ru0));
* We keep the exclusive lock until commit (perhaps not necessary)?
*/
- elog(MESSAGE_LEVEL, "Truncated %u --> %u pages.\n\t%s",
- old_rel_pages, new_rel_pages,
- vac_show_rusage(&ru0));
+ elog(elevel, "Truncated %u --> %u pages.\n\t%s", old_rel_pages,
+ new_rel_pages, vac_show_rusage(&ru0));
}
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.58 2002/02/23 01:31:35 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/variable.c,v 1.59 2002/03/02 21:39:23 momjian Exp $
*
*-------------------------------------------------------------------------
*/
strcat(buf, ((EuroDates) ? "European" : "US (NonEuropean)"));
strcat(buf, " conventions");
- elog(NOTICE, buf, NULL);
+ elog(INFO, buf, NULL);
return TRUE;
}
tzn = getenv("TZ");
if (tzn != NULL)
- elog(NOTICE, "Time zone is '%s'", tzn);
+ elog(INFO, "Time zone is '%s'", tzn);
else
- elog(NOTICE, "Time zone is unset");
+ elog(INFO, "Time zone is unset");
return TRUE;
} /* show_timezone() */
{
if (XactIsoLevel == XACT_SERIALIZABLE)
- elog(NOTICE, "TRANSACTION ISOLATION LEVEL is SERIALIZABLE");
+ elog(INFO, "TRANSACTION ISOLATION LEVEL is SERIALIZABLE");
else
- elog(NOTICE, "TRANSACTION ISOLATION LEVEL is READ COMMITTED");
+ elog(INFO, "TRANSACTION ISOLATION LEVEL is READ COMMITTED");
return TRUE;
}
static bool
show_random_seed(void)
{
- elog(NOTICE, "Seed for random number generator is unavailable");
+ elog(INFO, "Seed for random number generator is unavailable");
return (TRUE);
}
static bool
show_client_encoding(void)
{
- elog(NOTICE, "Current client encoding is '%s'",
+ elog(INFO, "Current client encoding is '%s'",
pg_get_client_encoding_name());
return TRUE;
}
static bool
parse_server_encoding(List *args)
{
- elog(NOTICE, "SET SERVER_ENCODING is not supported");
+ elog(INFO, "SET SERVER_ENCODING is not supported");
return TRUE;
}
static bool
show_server_encoding(void)
{
- elog(NOTICE, "Current server encoding is '%s'", GetDatabaseEncodingName());
+ elog(INFO, "Current server encoding is '%s'", GetDatabaseEncodingName());
return TRUE;
}
static bool
reset_server_encoding(void)
{
- elog(NOTICE, "RESET SERVER_ENCODING is not supported");
+ elog(INFO, "RESET SERVER_ENCODING is not supported");
return TRUE;
}
{
const char *val = GetConfigOption(name);
- elog(NOTICE, "%s is %s", name, val);
+ elog(INFO, "%s is %s", name, val);
}
}
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: execAmi.c,v 1.61 2002/02/19 20:11:13 tgl Exp $
+ * $Id: execAmi.c,v 1.62 2002/03/02 21:39:24 momjian Exp $
*
*-------------------------------------------------------------------------
*/
default:
/* don't make hard error unless caller asks to restore... */
- elog(DEBUG, "ExecMarkPos: node type %d not supported",
+ elog(LOG, "ExecMarkPos: node type %d not supported",
nodeTag(node));
break;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.150 2002/02/27 19:34:48 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/execMain.c,v 1.151 2002/03/02 21:39:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
break;
default:
- elog(DEBUG, "ExecutePlan: unknown operation in queryDesc");
+ elog(LOG, "ExecutePlan: unknown operation in queryDesc");
result = NULL;
break;
}
* Copyright (c) 2001, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/instrument.c,v 1.2 2001/10/25 05:49:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/instrument.c,v 1.3 2002/03/02 21:39:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return;
if (instr->starttime.tv_sec != 0 || instr->starttime.tv_usec != 0)
- elog(DEBUG, "InstrStartTimer called twice in a row");
+ elog(LOG, "InstrStartTimer called twice in a row");
else
gettimeofday(&instr->starttime, NULL);
}
if (instr->starttime.tv_sec == 0 && instr->starttime.tv_usec == 0)
{
- elog(DEBUG, "InstrStopNode without start");
+ elog(LOG, "InstrStopNode without start");
return;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.78 2001/10/25 05:49:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAgg.c,v 1.79 2002/03/02 21:39:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* So, just make a debug note, and force numaggs positive so that
* palloc()s below don't choke.
*/
- elog(DEBUG, "ExecInitAgg: could not find any aggregate functions");
+ elog(LOG, "ExecInitAgg: could not find any aggregate functions");
numaggs = 1;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.43 2001/10/25 05:49:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/executor/nodeAppend.c,v 1.44 2002/03/02 21:39:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
subnode = (Plan *) nth(whichplan, appendplans);
if (subnode == NULL)
- elog(DEBUG, "ExecProcAppend: subnode is NULL");
+ elog(LOG, "ExecProcAppend: subnode is NULL");
/*
* get a tuple from the subplan
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.75 2002/02/25 20:07:02 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/auth.c,v 1.76 2002/03/02 21:39:25 momjian Exp $
*
*-------------------------------------------------------------------------
*/
initStringInfo(&buf);
pq_getstr(&buf);
- if (DebugLvl > 5)
- fprintf(stderr, "received PAM packet with len=%d, pw=%s\n",
- len, buf.data);
+ elog(DEBUG5, "received PAM packet with len=%d, pw=%s\n", len, buf.data);
if (strlen(buf.data) == 0)
{
return STATUS_EOF;
}
- if (DebugLvl > 5) /* this is probably a BAD idea... */
- fprintf(stderr, "received password packet with len=%d, pw=%s\n",
- len, buf.data);
+ elog(DEBUG5, "received password packet with len=%d, pw=%s\n",
+ len, buf.data);
result = checkPassword(port, port->user, buf.data);
pfree(buf.data);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.42 2001/11/05 17:46:25 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/libpq/crypt.c,v 1.43 2002/03/02 21:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pwdfile = AllocateFile(filename, "r");
if (pwdfile == NULL && errno != ENOENT)
- elog(DEBUG, "could not open %s: %m", filename);
+ elog(LOG, "could not open %s: %m", filename);
pfree(filename);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: pqcomm.c,v 1.126 2001/12/04 20:57:22 tgl Exp $
+ * $Id: pqcomm.c,v 1.127 2002/03/02 21:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Careful: an elog() that tries to write to the client
* would cause recursion to here, leading to stack overflow
* and core dump! This message must go *only* to the postmaster
- * log. elog(DEBUG) is presently safe.
+ * log. elog(LOG) is presently safe.
*/
- elog(DEBUG, "pq_recvbuf: recv() failed: %m");
+ elog(LOG, "pq_recvbuf: recv() failed: %m");
return EOF;
}
if (r == 0)
{
/* as above, only write to postmaster log */
- elog(DEBUG, "pq_recvbuf: unexpected EOF on client connection");
+ elog(LOG, "pq_recvbuf: unexpected EOF on client connection");
return EOF;
}
/* r contains number of bytes read, so just incr length */
* Careful: an elog() that tries to write to the client
* would cause recursion to here, leading to stack overflow
* and core dump! This message must go *only* to the postmaster
- * log. elog(DEBUG) is presently safe.
+ * log. elog(LOG) is presently safe.
*
* If a client disconnects while we're in the midst of output,
* we might write quite a bit of data before we get to a safe
if (errno != last_reported_send_errno)
{
last_reported_send_errno = errno;
- elog(DEBUG, "pq_flush: send() failed: %m");
+ elog(LOG, "pq_flush: send() failed: %m");
}
/*
if (res < 0)
{
/* can log to postmaster log only */
- elog(DEBUG, "pq_eof: recv() failed: %m");
+ elog(LOG, "pq_eof: recv() failed: %m");
return EOF;
}
if (res == 0)
* geqo_erx.c
* edge recombination crossover [ER]
*
-* $Id: geqo_erx.c,v 1.16 2001/10/25 05:49:31 momjian Exp $
+* $Id: geqo_erx.c,v 1.17 2002/03/02 21:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
}
- elog(DEBUG, "edge_failure(1): no edge found via random decision and total_edges == 4");
+ elog(LOG, "edge_failure(1): no edge found via random decision and total_edges == 4");
}
else
}
}
- elog(DEBUG, "edge_failure(2): no edge found via random decision and remainig edges");
+ elog(LOG, "edge_failure(2): no edge found via random decision and remainig edges");
}
/*
if (edge_table[i].unused_edges >= 0)
return (Gene) i;
- elog(DEBUG, "edge_failure(3): no edge found via looking for the last ununsed point");
+ elog(LOG, "edge_failure(3): no edge found via looking for the last ununsed point");
}
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: geqo_main.c,v 1.29 2001/10/25 05:49:31 momjian Exp $
+ * $Id: geqo_main.c,v 1.30 2002/03/02 21:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
daddy = alloc_chromo(pool->string_length);
#if defined (ERX)
- elog(DEBUG, "geqo_main: using edge recombination crossover [ERX]");
+ elog(LOG, "geqo_main: using edge recombination crossover [ERX]");
/* allocate edge table memory */
edge_table = alloc_edge_table(pool->string_length);
#elif defined(PMX)
- elog(DEBUG, "geqo_main: using partially matched crossover [PMX]");
+ elog(LOG, "geqo_main: using partially matched crossover [PMX]");
/* allocate chromosome kid memory */
kid = alloc_chromo(pool->string_length);
#elif defined(CX)
- elog(DEBUG, "geqo_main: using cycle crossover [CX]");
+ elog(LOG, "geqo_main: using cycle crossover [CX]");
/* allocate city table memory */
kid = alloc_chromo(pool->string_length);
city_table = alloc_city_table(pool->string_length);
#elif defined(PX)
- elog(DEBUG, "geqo_main: using position crossover [PX]");
+ elog(LOG, "geqo_main: using position crossover [PX]");
/* allocate city table memory */
kid = alloc_chromo(pool->string_length);
city_table = alloc_city_table(pool->string_length);
#elif defined(OX1)
- elog(DEBUG, "geqo_main: using order crossover [OX1]");
+ elog(LOG, "geqo_main: using order crossover [OX1]");
/* allocate city table memory */
kid = alloc_chromo(pool->string_length);
city_table = alloc_city_table(pool->string_length);
#elif defined(OX2)
- elog(DEBUG, "geqo_main: using order crossover [OX2]");
+ elog(LOG, "geqo_main: using order crossover [OX2]");
/* allocate city table memory */
kid = alloc_chromo(pool->string_length);
city_table = alloc_city_table(pool->string_length);
#if defined(ERX) && defined(GEQO_DEBUG)
if (edge_failures != 0)
- elog(DEBUG, "[GEQO] failures: %d, average: %d",
+ elog(LOG, "[GEQO] failures: %d, average: %d",
edge_failures, (int) generation / edge_failures);
else
- elog(DEBUG, "[GEQO] No edge failures detected.");
+ elog(LOG, "[GEQO] No edge failures detected.");
#endif
#if defined(CX) && defined(GEQO_DEBUG)
if (mutations != 0)
- elog(DEBUG, "[GEQO] mutations: %d, generations: %d", mutations, generation);
+ elog(LOG, "[GEQO] mutations: %d, generations: %d", mutations, generation);
else
- elog(DEBUG, "[GEQO] No mutations processed.");
+ elog(LOG, "[GEQO] No mutations processed.");
#endif
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.112 2001/10/25 05:49:32 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/optimizer/path/indxpath.c,v 1.113 2002/03/02 21:39:26 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!HeapTupleIsValid(tuple))
{
/* this probably shouldn't fail? */
- elog(DEBUG, "pred_test_simple_clause: unknown test_op");
+ elog(LOG, "pred_test_simple_clause: unknown test_op");
return false;
}
aform = (Form_pg_amop) GETSTRUCT(tuple);
if (isNull)
{
- elog(DEBUG, "pred_test_simple_clause: null test result");
+ elog(LOG, "pred_test_simple_clause: null test result");
return false;
}
return DatumGetBool(test_result);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.215 2002/02/26 22:47:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/analyze.c,v 1.216 2002/03/02 21:39:27 momjian Exp $
*
*-------------------------------------------------------------------------
*/
sequence->istemp = cxt->istemp;
sequence->options = NIL;
- elog(NOTICE, "%s will create implicit sequence '%s' for SERIAL column '%s.%s'",
+ elog(INFO, "%s will create implicit sequence '%s' for SERIAL column '%s.%s'",
cxt->stmtType, sequence->seqname, cxt->relname, column->colname);
cxt->blist = lappend(cxt->blist, sequence);
elog(ERROR, "%s: failed to make implicit index name",
cxt->stmtType);
- elog(NOTICE, "%s / %s%s will create implicit index '%s' for table '%s'",
+ elog(INFO, "%s / %s%s will create implicit index '%s' for table '%s'",
cxt->stmtType,
(strcmp(cxt->stmtType, "ALTER TABLE") == 0) ? "ADD " : "",
(index->primary ? "PRIMARY KEY" : "UNIQUE"),
if (cxt->fkconstraints == NIL)
return;
- elog(NOTICE, "%s will create implicit trigger(s) for FOREIGN KEY check(s)",
+ elog(INFO, "%s will create implicit trigger(s) for FOREIGN KEY check(s)",
cxt->stmtType);
foreach(fkclist, cxt->fkconstraints)
elog(ERROR, "unsupported expression in %%TYPE");
v = (Var *) n;
tyn = typeidTypeName(v->vartype);
- elog(NOTICE, "%s.%s%%TYPE converted to %s", tn->name, tn->attrname, tyn);
+ elog(INFO, "%s.%s%%TYPE converted to %s", tn->name, tn->attrname, tyn);
tn->name = tyn;
tn->typmod = v->vartypmod;
tn->attrname = NULL;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/gram.y,v 2.282 2002/03/01 22:45:12 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/gram.y,v 2.283 2002/03/02 21:39:27 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
n->constraints = $3;
if ($4 != NULL)
- elog(NOTICE,"CREATE TABLE / COLLATE %s not yet implemented"
+ elog(INFO,"CREATE TABLE / COLLATE %s not yet implemented"
"; clause ignored", $4);
$$ = (Node *)n;
| RELATIVE { $$ = RELATIVE; }
| ABSOLUTE
{
- elog(NOTICE,"FETCH / ABSOLUTE not supported, using RELATIVE");
+ elog(INFO,"FETCH / ABSOLUTE not supported, using RELATIVE");
$$ = RELATIVE;
}
;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.60 2001/11/05 17:46:26 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/parser/parse_relation.c,v 1.61 2002/03/02 21:39:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
}
if (foundInFromCl)
- elog(NOTICE, "Adding missing FROM-clause entry%s for table \"%s\"",
+ elog(INFO, "Adding missing FROM-clause entry%s for table \"%s\"",
pstate->parentParseState != NULL ? " in subquery" : "",
refname);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.268 2002/03/02 20:46:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/postmaster/postmaster.c,v 1.269 2002/03/02 21:39:28 momjian Exp $
*
* NOTES
*
potential_DataDir = optarg;
break;
case 'd':
-
- /*
- * Turn on debugging for the postmaster and the backend
- * servers descended from it.
- */
- SetConfigOption("debug_level", optarg, PGC_POSTMASTER, PGC_S_ARGV);
+ {
+ /* Turn on debugging for the postmaster. */
+ char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
+ sprintf(debugstr, "debug%s", optarg);
+ /* We use PGC_S_SESSION because we will reset in backend */
+ SetConfigOption("server_min_messages", debugstr, PGC_POSTMASTER, PGC_S_SESSION);
+ pfree(debugstr);
break;
+ }
case 'F':
SetConfigOption("fsync", "false", PGC_POSTMASTER, PGC_S_ARGV);
break;
#endif
/* For debugging: display postmaster environment */
- if (DebugLvl > 2)
{
extern char **environ;
char **p;
- fprintf(stderr, "%s: PostmasterMain: initial environ dump:\n",
- progname);
- fprintf(stderr, "-----------------------------------------\n");
+ elog(DEBUG2, "%s: PostmasterMain: initial environ dump:", progname);
+ elog(DEBUG2, "-----------------------------------------");
for (p = environ; *p; ++p)
- fprintf(stderr, "\t%s\n", *p);
- fprintf(stderr, "-----------------------------------------\n");
+ elog(DEBUG2, "\t%s", *p);
+ elog(DEBUG2, "-----------------------------------------");
}
/*
PG_SETMASK(&BlockSig);
if (errno == EINTR || errno == EWOULDBLOCK)
continue;
- elog(DEBUG, "ServerLoop: select failed: %m");
+ elog(LOG, "ServerLoop: select failed: %m");
return STATUS_ERROR;
}
if (pq_getbytes((char *) &len, 4) == EOF)
{
- elog(DEBUG, "incomplete startup packet");
+ elog(LOG, "incomplete startup packet");
return STATUS_ERROR;
}
if (pq_getbytes(buf, len) == EOF)
{
- elog(DEBUG, "incomplete startup packet");
+ elog(LOG, "incomplete startup packet");
return STATUS_ERROR;
}
#endif
if (send(port->sock, &SSLok, 1, 0) != 1)
{
- elog(DEBUG, "failed to send SSL negotiation response: %s",
+ elog(LOG, "failed to send SSL negotiation response: %s",
strerror(errno));
return STATUS_ERROR; /* close the connection */
}
!SSL_set_fd(port->ssl, port->sock) ||
SSL_accept(port->ssl) <= 0)
{
- elog(DEBUG, "failed to initialize SSL connection: %s (%m)",
+ elog(LOG, "failed to initialize SSL connection: %s (%m)",
SSLerrmessage());
return STATUS_ERROR;
}
if (backendPID == CheckPointPID)
{
- if (DebugLvl)
- elog(DEBUG, "processCancelRequest: CheckPointPID in cancel request for process %d", backendPID);
+ elog(DEBUG1, "processCancelRequest: CheckPointPID in cancel request for process %d", backendPID);
return;
}
if (bp->cancel_key == cancelAuthCode)
{
/* Found a match; signal that backend to cancel current op */
- if (DebugLvl)
- elog(DEBUG, "processing cancel request: sending SIGINT to process %d",
- backendPID);
+ elog(DEBUG1, "processing cancel request: sending SIGINT to process %d",
+ backendPID);
kill(bp->pid, SIGINT);
}
else
- {
/* Right PID, wrong key: no way, Jose */
- if (DebugLvl)
- elog(DEBUG, "bad key in cancel request for process %d",
- backendPID);
- }
+ elog(DEBUG1, "bad key in cancel request for process %d",
+ backendPID);
return;
}
}
/* No matching backend */
- if (DebugLvl)
- elog(DEBUG, "bad pid in cancel request for process %d", backendPID);
+ elog(DEBUG1, "bad pid in cancel request for process %d", backendPID);
}
/*
if (!(port = (Port *) calloc(1, sizeof(Port))))
{
- elog(DEBUG, "ConnCreate: malloc failed");
+ elog(LOG, "ConnCreate: malloc failed");
SignalChildren(SIGQUIT);
ExitPostmaster(1);
}
PG_SETMASK(&BlockSig);
- if (DebugLvl >= 1)
- elog(DEBUG, "pmdie %d", postgres_signal_arg);
+ elog(DEBUG1, "pmdie %d", postgres_signal_arg);
switch (postgres_signal_arg)
{
if (Shutdown >= SmartShutdown)
break;
Shutdown = SmartShutdown;
- elog(DEBUG, "smart shutdown request");
+ elog(LOG, "smart shutdown request");
if (DLGetHead(BackendList)) /* let reaper() handle this */
break;
break;
if (ShutdownPID > 0)
{
- elog(REALLYFATAL, "shutdown process %d already running",
+ elog(PANIC, "shutdown process %d already running",
(int) ShutdownPID);
abort();
}
*/
if (Shutdown >= FastShutdown)
break;
- elog(DEBUG, "fast shutdown request");
+ elog(LOG, "fast shutdown request");
if (DLGetHead(BackendList)) /* let reaper() handle this */
{
Shutdown = FastShutdown;
if (!FatalError)
{
- elog(DEBUG, "aborting any active transactions");
+ elog(LOG, "aborting any active transactions");
SignalChildren(SIGTERM);
}
break;
break;
if (ShutdownPID > 0)
{
- elog(REALLYFATAL, "shutdown process %d already running",
+ elog(PANIC, "shutdown process %d already running",
(int) ShutdownPID);
abort();
}
* abort all children with SIGQUIT and exit without attempt to
* properly shutdown data base system.
*/
- elog(DEBUG, "immediate shutdown request");
+ elog(LOG, "immediate shutdown request");
if (ShutdownPID > 0)
kill(ShutdownPID, SIGQUIT);
if (StartupPID > 0)
PG_SETMASK(&BlockSig);
- if (DebugLvl)
- elog(DEBUG, "reaping dead processes");
+ elog(DEBUG1, "reaping dead processes");
#ifdef HAVE_WAITPID
while ((pid = waitpid(-1, &status, WNOHANG)) > 0)
{
{
LogChildExit(gettext("startup process"),
pid, exitstatus);
- elog(DEBUG, "aborting startup due to startup process failure");
+ elog(LOG, "aborting startup due to startup process failure");
ExitPostmaster(1);
}
StartupPID = 0;
{
if (ShutdownPID > 0)
{
- elog(STOP, "startup process %d died while shutdown process %d already running",
+ elog(PANIC, "startup process %d died while shutdown process %d already running",
pid, (int) ShutdownPID);
abort();
}
*/
if (DLGetHead(BackendList) || StartupPID > 0 || ShutdownPID > 0)
goto reaper_done;
- elog(DEBUG, "all server processes terminated; reinitializing shared memory and semaphores");
+ elog(LOG, "all server processes terminated; reinitializing shared memory and semaphores");
shmem_exit(0);
reset_shared(PostPortNumber);
*next;
Backend *bp;
- if (DebugLvl)
- LogChildExit(gettext("child process"), pid, exitstatus);
+ LogChildExit(gettext("child process"), pid, exitstatus);
/*
* If a backend dies in an ugly way (i.e. exit status not 0) then we
if (!FatalError)
{
LogChildExit(gettext("server process"), pid, exitstatus);
- elog(DEBUG, "terminating any other active server processes");
+ elog(LOG, "terminating any other active server processes");
}
curr = DLGetHead(BackendList);
*/
if (!FatalError)
{
- if (DebugLvl)
- elog(DEBUG, "CleanupProc: sending %s to process %d",
- (SendStop ? "SIGSTOP" : "SIGQUIT"),
- (int) bp->pid);
+ elog(DEBUG1, "CleanupProc: sending %s to process %d",
+ (SendStop ? "SIGSTOP" : "SIGQUIT"), (int) bp->pid);
kill(bp->pid, (SendStop ? SIGSTOP : SIGQUIT));
}
}
* describing a child process, such as "server process"
*/
if (WIFEXITED(exitstatus))
- elog(DEBUG, "%s (pid %d) exited with exit code %d",
+ elog(LOG, "%s (pid %d) exited with exit code %d",
procname, pid, WEXITSTATUS(exitstatus));
else if (WIFSIGNALED(exitstatus))
- elog(DEBUG, "%s (pid %d) was terminated by signal %d",
+ elog(LOG, "%s (pid %d) was terminated by signal %d",
procname, pid, WTERMSIG(exitstatus));
else
- elog(DEBUG, "%s (pid %d) exited with unexpected status %d",
+ elog(LOG, "%s (pid %d) exited with unexpected status %d",
procname, pid, exitstatus);
}
if (bp->pid != MyProcPid)
{
- if (DebugLvl >= 1)
- elog(DEBUG, "SignalChildren: sending signal %d to process %d",
- signal, (int) bp->pid);
-
+ elog(DEBUG1, "SignalChildren: sending signal %d to process %d",
+ signal, (int) bp->pid);
kill(bp->pid, signal);
}
bn = (Backend *) malloc(sizeof(Backend));
if (!bn)
{
- elog(DEBUG, "out of memory; connection startup aborted");
+ elog(LOG, "out of memory; connection startup aborted");
return STATUS_ERROR;
}
status = DoBackend(port);
if (status != 0)
{
- elog(DEBUG, "connection startup failed");
+ elog(LOG, "connection startup failed");
proc_exit(status);
}
else
beos_backend_startup_failed();
#endif
free(bn);
- elog(DEBUG, "connection startup failed (fork failure): %s",
+ elog(LOG, "connection startup failed (fork failure): %s",
strerror(save_errno));
report_fork_failure_to_client(port, save_errno);
return STATUS_ERROR;
}
/* in parent, normal */
- if (DebugLvl >= 1)
- elog(DEBUG, "BackendStartup: forked pid=%d socket=%d",
- (int) pid, port->sock);
+ elog(DEBUG1, "BackendStartup: forked pid=%d socket=%d", (int) pid,
+ port->sock);
/*
* Everything's been successful, it's safe to add this backend to our
char *remote_host;
char *av[ARGV_SIZE * 2];
int ac = 0;
- char debugbuf[ARGV_SIZE];
char protobuf[ARGV_SIZE];
char dbbuf[ARGV_SIZE];
char optbuf[ARGV_SIZE];
PG_SETMASK(&BlockSig);
if (Log_connections)
- elog(DEBUG, "connection: host=%s user=%s database=%s",
+ elog(LOG, "connection: host=%s user=%s database=%s",
remote_host, port->user, port->database);
/*
av[ac++] = "postgres";
- /*
- * Pass the requested debugging level along to the backend. Level one
- * debugging in the postmaster traces postmaster connection activity,
- * and levels two and higher are passed along to the backend. This
- * allows us to watch only the postmaster or the postmaster and the
- * backend.
- */
- if (DebugLvl > 1)
- {
- sprintf(debugbuf, "-d%d", DebugLvl);
- av[ac++] = debugbuf;
- }
-
/*
* Pass any backend switches specified with -o in the postmaster's own
* command line. We assume these are secure. (It's OK to mangle
/*
* Debug: print arguments being passed to backend
*/
- if (DebugLvl > 1)
- {
- fprintf(stderr, "%s child[%d]: starting with (",
- progname, MyProcPid);
- for (i = 0; i < ac; ++i)
- fprintf(stderr, "%s ", av[i]);
- fprintf(stderr, ")\n");
- }
+ elog(DEBUG2, "%s child[%d]: starting with (", progname, MyProcPid);
+ for (i = 0; i < ac; ++i)
+ elog(DEBUG2, "%s ", av[i]);
+ elog(DEBUG2, ")\n");
return (PostgresMain(ac, av, port->user));
}
/* Set up command-line arguments for subprocess */
av[ac++] = "postgres";
- av[ac++] = "-d";
-
sprintf(nbbuf, "-B%d", NBuffers);
av[ac++] = nbbuf;
switch (xlop)
{
case BS_XLOG_STARTUP:
- elog(DEBUG, "could not launch startup process (fork failure): %s",
+ elog(LOG, "could not launch startup process (fork failure): %s",
strerror(errno));
break;
case BS_XLOG_CHECKPOINT:
- elog(DEBUG, "could not launch checkpoint process (fork failure): %s",
+ elog(LOG, "could not launch checkpoint process (fork failure): %s",
strerror(errno));
break;
case BS_XLOG_SHUTDOWN:
default:
- elog(DEBUG, "could not launch shutdown process (fork failure): %s",
+ elog(LOG, "could not launch shutdown process (fork failure): %s",
strerror(errno));
break;
}
{
if (!(bn = (Backend *) malloc(sizeof(Backend))))
{
- elog(DEBUG, "CheckPointDataBase: malloc failed");
+ elog(LOG, "CheckPointDataBase: malloc failed");
ExitPostmaster(1);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.120 2001/11/10 23:51:14 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.121 2002/03/02 21:39:28 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
if (buf->flags & BM_JUST_DIRTIED)
{
- elog(STOP, "BufferAlloc: content of block %u (%u/%u) changed while flushing",
+ elog(PANIC, "BufferAlloc: content of block %u (%u/%u) changed while flushing",
buf->tag.blockNum,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode);
}
}
if (status == SM_FAIL) /* disk failure ?! */
- elog(STOP, "BufferSync: cannot write %u for %u/%u",
+ elog(PANIC, "BufferSync: cannot write %u for %u/%u",
bufHdr->tag.blockNum,
bufHdr->tag.rnode.tblNode, bufHdr->tag.rnode.relNode);
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
for (i = 0; i < NBuffers; ++i, ++buf)
{
- elog(DEBUG, "[%02d] (freeNext=%d, freePrev=%d, rel=%u/%u, \
+ elog(LOG, "[%02d] (freeNext=%d, freePrev=%d, rel=%u/%u, \
blockNum=%u, flags=0x%x, refcount=%d %ld)",
i, buf->freeNext, buf->freePrev,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode,
(char *) MAKE_PTR(bufHdr->data));
if (status == SM_FAIL) /* disk failure ?! */
- elog(STOP, "FlushRelationBuffers: cannot write %u for %u/%u",
+ elog(PANIC, "FlushRelationBuffers: cannot write %u for %u/%u",
bufHdr->tag.blockNum,
bufHdr->tag.rnode.tblNode,
bufHdr->tag.rnode.relNode);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.88 2002/02/10 22:56:31 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.89 2002/03/02 21:39:29 momjian Exp $
*
* NOTES:
*
{
int save_errno = errno;
- DO_DB(elog(DEBUG, "BasicOpenFile: not enough descs, retry, er= %d",
+ DO_DB(elog(LOG, "BasicOpenFile: not enough descs, retry, er= %d",
errno));
errno = 0;
if (ReleaseLruFile())
#else
no_files = (long) max_files_per_process;
#endif
- elog(DEBUG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
+ elog(LOG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
no_files);
}
#else /* !HAVE_SYSCONF */
sprintf(buf + strlen(buf), "%d ", mru);
}
sprintf(buf + strlen(buf), "LEAST");
- elog(DEBUG, buf);
+ elog(LOG, buf);
}
#endif /* FDDEBUG */
Assert(file != 0);
- DO_DB(elog(DEBUG, "Delete %d (%s)",
+ DO_DB(elog(LOG, "Delete %d (%s)",
file, VfdCache[file].fileName));
DO_DB(_dump_lru());
Assert(file != 0);
- DO_DB(elog(DEBUG, "LruDelete %d (%s)",
+ DO_DB(elog(LOG, "LruDelete %d (%s)",
file, VfdCache[file].fileName));
vfdP = &VfdCache[file];
if (vfdP->fdstate & FD_DIRTY)
{
if (pg_fsync(vfdP->fd))
- elog(DEBUG, "LruDelete: failed to fsync %s: %m",
+ elog(LOG, "LruDelete: failed to fsync %s: %m",
vfdP->fileName);
vfdP->fdstate &= ~FD_DIRTY;
}
/* close the file */
if (close(vfdP->fd))
- elog(DEBUG, "LruDelete: failed to close %s: %m",
+ elog(LOG, "LruDelete: failed to close %s: %m",
vfdP->fileName);
--nfile;
Assert(file != 0);
- DO_DB(elog(DEBUG, "Insert %d (%s)",
+ DO_DB(elog(LOG, "Insert %d (%s)",
file, VfdCache[file].fileName));
DO_DB(_dump_lru());
Assert(file != 0);
- DO_DB(elog(DEBUG, "LruInsert %d (%s)",
+ DO_DB(elog(LOG, "LruInsert %d (%s)",
file, VfdCache[file].fileName));
vfdP = &VfdCache[file];
vfdP->fileMode);
if (vfdP->fd < 0)
{
- DO_DB(elog(DEBUG, "RE_OPEN FAILED: %d", errno));
+ DO_DB(elog(LOG, "RE_OPEN FAILED: %d", errno));
return vfdP->fd;
}
else
{
- DO_DB(elog(DEBUG, "RE_OPEN SUCCESS"));
+ DO_DB(elog(LOG, "RE_OPEN SUCCESS"));
++nfile;
}
static bool
ReleaseLruFile(void)
{
- DO_DB(elog(DEBUG, "ReleaseLruFile. Opened %d", nfile));
+ DO_DB(elog(LOG, "ReleaseLruFile. Opened %d", nfile));
if (nfile > 0)
{
Index i;
File file;
- DO_DB(elog(DEBUG, "AllocateVfd. Size %d", SizeVfdCache));
+ DO_DB(elog(LOG, "AllocateVfd. Size %d", SizeVfdCache));
if (SizeVfdCache == 0)
{
{
Vfd *vfdP = &VfdCache[file];
- DO_DB(elog(DEBUG, "FreeVfd: %d (%s)",
+ DO_DB(elog(LOG, "FreeVfd: %d (%s)",
file, vfdP->fileName ? vfdP->fileName : ""));
if (vfdP->fileName != NULL)
{
int returnValue;
- DO_DB(elog(DEBUG, "FileAccess %d (%s)",
+ DO_DB(elog(LOG, "FileAccess %d (%s)",
file, VfdCache[file].fileName));
/*
if (fileName == NULL)
elog(ERROR, "fileNameOpenFile: NULL fname");
- DO_DB(elog(DEBUG, "fileNameOpenFile: %s %x %o",
+ DO_DB(elog(LOG, "fileNameOpenFile: %s %x %o",
fileName, fileFlags, fileMode));
file = AllocateVfd();
return -1;
}
++nfile;
- DO_DB(elog(DEBUG, "fileNameOpenFile: success %d",
+ DO_DB(elog(LOG, "fileNameOpenFile: success %d",
vfdP->fd));
Insert(file);
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileClose: %d (%s)",
+ DO_DB(elog(LOG, "FileClose: %d (%s)",
file, VfdCache[file].fileName));
vfdP = &VfdCache[file];
if (vfdP->fdstate & FD_DIRTY)
{
if (pg_fsync(vfdP->fd))
- elog(DEBUG, "FileClose: failed to fsync %s: %m",
+ elog(LOG, "FileClose: failed to fsync %s: %m",
vfdP->fileName);
vfdP->fdstate &= ~FD_DIRTY;
}
/* close the file */
if (close(vfdP->fd))
- elog(DEBUG, "FileClose: failed to close %s: %m",
+ elog(LOG, "FileClose: failed to close %s: %m",
vfdP->fileName);
--nfile;
/* reset flag so that die() interrupt won't cause problems */
vfdP->fdstate &= ~FD_TEMPORARY;
if (unlink(vfdP->fileName))
- elog(DEBUG, "FileClose: failed to unlink %s: %m",
+ elog(LOG, "FileClose: failed to unlink %s: %m",
vfdP->fileName);
}
{
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileUnlink: %d (%s)",
+ DO_DB(elog(LOG, "FileUnlink: %d (%s)",
file, VfdCache[file].fileName));
/* force FileClose to delete it */
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileRead: %d (%s) %ld %d %p",
+ DO_DB(elog(LOG, "FileRead: %d (%s) %ld %d %p",
file, VfdCache[file].fileName,
VfdCache[file].seekPos, amount, buffer));
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileWrite: %d (%s) %ld %d %p",
+ DO_DB(elog(LOG, "FileWrite: %d (%s) %ld %d %p",
file, VfdCache[file].fileName,
VfdCache[file].seekPos, amount, buffer));
{
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileSeek: %d (%s) %ld %ld %d",
+ DO_DB(elog(LOG, "FileSeek: %d (%s) %ld %ld %d",
file, VfdCache[file].fileName,
VfdCache[file].seekPos, offset, whence));
FileTell(File file)
{
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileTell %d (%s)",
+ DO_DB(elog(LOG, "FileTell %d (%s)",
file, VfdCache[file].fileName));
return VfdCache[file].seekPos;
}
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileTruncate %d (%s)",
+ DO_DB(elog(LOG, "FileTruncate %d (%s)",
file, VfdCache[file].fileName));
FileSync(file);
{
Assert(FileIsValid(file));
- DO_DB(elog(DEBUG, "FileMarkDirty: %d (%s)",
+ DO_DB(elog(LOG, "FileMarkDirty: %d (%s)",
file, VfdCache[file].fileName));
VfdCache[file].fdstate |= FD_DIRTY;
{
FILE *file;
- DO_DB(elog(DEBUG, "AllocateFile: Allocated %d", numAllocatedFiles));
+ DO_DB(elog(LOG, "AllocateFile: Allocated %d", numAllocatedFiles));
if (numAllocatedFiles >= MAX_ALLOCATED_FILES)
elog(ERROR, "AllocateFile: too many private FDs demanded");
{
int save_errno = errno;
- DO_DB(elog(DEBUG, "AllocateFile: not enough descs, retry, er= %d",
+ DO_DB(elog(LOG, "AllocateFile: not enough descs, retry, er= %d",
errno));
errno = 0;
if (ReleaseLruFile())
{
int i;
- DO_DB(elog(DEBUG, "FreeFile: Allocated %d", numAllocatedFiles));
+ DO_DB(elog(LOG, "FreeFile: Allocated %d", numAllocatedFiles));
/* Remove file from list of allocated files, if it's present */
for (i = numAllocatedFiles; --i >= 0;)
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.75 2001/11/05 17:46:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.76 2002/03/02 21:39:29 momjian Exp $
*
* NOTES
*
InterruptHoldoffCount = 1;
CritSectionCount = 0;
- if (DebugLvl > 1)
- elog(DEBUG, "proc_exit(%d)", code);
+ elog(DEBUG2, "proc_exit(%d)", code);
/* do our shared memory exits first */
shmem_exit(code);
(*on_proc_exit_list[on_proc_exit_index].function) (code,
on_proc_exit_list[on_proc_exit_index].arg);
- if (DebugLvl > 1)
- elog(DEBUG, "exit(%d)", code);
+ elog(DEBUG2, "exit(%d)", code);
exit(code);
}
void
shmem_exit(int code)
{
- if (DebugLvl > 1)
- elog(DEBUG, "shmem_exit(%d)", code);
+ elog(DEBUG2, "shmem_exit(%d)", code);
/*
* call all the registered callbacks.
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.45 2001/11/04 19:55:31 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.46 2002/03/02 21:39:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* might as well round it off to a multiple of a typical page size */
size += 8192 - (size % 8192);
- if (DebugLvl > 1)
- fprintf(stderr, "invoking IpcMemoryCreate(size=%d)\n", size);
+ elog(DEBUG2, "invoking IpcMemoryCreate(size=%d)", size);
/*
* Create the shmem segment
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.43 2001/10/25 05:49:42 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.44 2002/03/02 21:39:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
insertOK = SIInsertDataEntry(shmInvalBuffer, msg);
LWLockRelease(SInvalLock);
if (!insertOK)
- elog(DEBUG, "SendSharedInvalidMessage: SI buffer overflow");
+ elog(LOG, "SendSharedInvalidMessage: SI buffer overflow");
}
/*
if (getResult < 0)
{
/* got a reset message */
- elog(DEBUG, "ReceiveSharedInvalidMessages: cache state reset");
+ elog(LOG, "ReceiveSharedInvalidMessages: cache state reset");
resetFunction();
}
else
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.43 2001/11/05 17:46:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.44 2002/03/02 21:39:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
MyBackendId = (stateP - &segP->procState[0]) + 1;
#ifdef INVALIDDEBUG
- elog(DEBUG, "SIBackendInit: backend id %d", MyBackendId);
+ elog(LOG, "SIBackendInit: backend id %d", MyBackendId);
#endif /* INVALIDDEBUG */
/* mark myself active, with all extant messages already read */
if (numMsgs == (MAXNUMMESSAGES * 70 / 100) &&
IsUnderPostmaster)
{
- if (DebugLvl >= 1)
- elog(DEBUG, "SIInsertDataEntry: table is 70%% full, signaling postmaster");
+ elog(DEBUG1, "SIInsertDataEntry: table is 70%% full, signaling postmaster");
SendPostmasterSignal(PMSIGNAL_WAKEN_CHILDREN);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.104 2001/11/05 17:46:28 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.105 2002/03/02 21:39:29 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
LOCK_PRINT(const char *where, const LOCK *lock, LOCKMODE type)
{
if (LOCK_DEBUG_ENABLED(lock))
- elog(DEBUG,
+ elog(LOG,
"%s: lock(%lx) tbl(%d) rel(%u) db(%u) obj(%u) grantMask(%x) "
"req(%d,%d,%d,%d,%d,%d,%d)=%d "
"grant(%d,%d,%d,%d,%d,%d,%d)=%d wait(%d) type(%s)",
&& (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId >= (Oid) Trace_lock_oidmin))
|| (Trace_lock_table && (((LOCK *) MAKE_PTR(holderP->tag.lock))->tag.relId == Trace_lock_table))
)
- elog(DEBUG,
+ elog(LOG,
"%s: holder(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%d,%d,%d,%d,%d,%d,%d)=%d",
where, MAKE_OFFSET(holderP), holderP->tag.lock,
HOLDER_LOCKMETHOD(*(holderP)),
#ifdef LOCK_DEBUG
if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
- elog(DEBUG, "LockAcquire: user lock [%u] %s",
+ elog(LOG, "LockAcquire: user lock [%u] %s",
locktag->objId.blkno, lock_mode_names[lockmode]);
#endif
{
if (i >= (int) lockmode)
break; /* safe: we have a lock >= req level */
- elog(DEBUG, "Deadlock risk: raising lock level"
+ elog(LOG, "Deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
#ifdef LOCK_DEBUG
if (lockmethod == USER_LOCKMETHOD && Trace_userlocks)
- elog(DEBUG, "LockRelease: user lock tag [%u] %d", locktag->objId.blkno, lockmode);
+ elog(LOG, "LockRelease: user lock tag [%u] %d", locktag->objId.blkno, lockmode);
#endif
/* ???????? This must be changed when short term locks will be used */
#ifdef LOCK_DEBUG
if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
- elog(DEBUG, "LockReleaseAll: lockmethod=%d, pid=%d",
+ elog(LOG, "LockReleaseAll: lockmethod=%d, pid=%d",
lockmethod, proc->pid);
#endif
#ifdef LOCK_DEBUG
if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
- elog(DEBUG, "LockReleaseAll: done");
+ elog(LOG, "LockReleaseAll: done");
#endif
return TRUE;
LOCK_PRINT("DumpAllLocks", lock, 0);
}
else
- elog(DEBUG, "DumpAllLocks: holder->tag.lock = NULL");
+ elog(LOG, "DumpAllLocks: holder->tag.lock = NULL");
}
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.8 2002/01/07 16:33:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.9 2002/03/02 21:39:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
PRINT_LWDEBUG(const char *where, LWLockId lockid, const volatile LWLock *lock)
{
if (Trace_lwlocks)
- elog(DEBUG, "%s(%d): excl %d shared %d head %p rOK %d",
+ elog(LOG, "%s(%d): excl %d shared %d head %p rOK %d",
where, (int) lockid,
(int) lock->exclusive, lock->shared, lock->head,
(int) lock->releaseOK);
LOG_LWDEBUG(const char *where, LWLockId lockid, const char *msg)
{
if (Trace_lwlocks)
- elog(DEBUG, "%s(%d): %s",
- where, (int) lockid, msg);
+ elog(LOG, "%s(%d): %s", where, (int) lockid, msg);
}
#else /* not LOCK_DEBUG */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.117 2001/12/28 18:16:43 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.118 2002/03/02 21:39:29 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* we are a backend, we inherit this by fork() from the postmaster).
*/
if (procglobal == NULL)
- elog(STOP, "InitProcess: Proc Header uninitialized");
+ elog(PANIC, "InitProcess: Proc Header uninitialized");
if (MyProc != NULL)
elog(ERROR, "InitProcess: you already exist");
* inherit this by fork() from the postmaster).
*/
if (ProcGlobal == NULL || DummyProc == NULL)
- elog(STOP, "InitDummyProcess: Proc Header uninitialized");
+ elog(PANIC, "InitDummyProcess: Proc Header uninitialized");
if (MyProc != NULL)
elog(ERROR, "InitDummyProcess: you already exist");
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.89 2001/10/28 06:25:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.90 2002/03/02 21:39:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (lseek(fd, seekpos, SEEK_SET) != seekpos)
{
- elog(DEBUG, "mdblindwrt: lseek(%ld) failed: %m", seekpos);
+ elog(LOG, "mdblindwrt: lseek(%ld) failed: %m", seekpos);
close(fd);
return SM_FAIL;
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(DEBUG, "mdblindwrt: write() failed: %m");
+ elog(LOG, "mdblindwrt: write() failed: %m");
status = SM_FAIL;
}
if (close(fd) < 0)
{
- elog(DEBUG, "mdblindwrt: close() failed: %m");
+ elog(LOG, "mdblindwrt: close() failed: %m");
status = SM_FAIL;
}
/* call fd.c to allow other FDs to be closed if needed */
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, 0600);
if (fd < 0)
- elog(DEBUG, "_mdfd_blind_getseg: couldn't open %s: %m", path);
+ elog(LOG, "_mdfd_blind_getseg: couldn't open %s: %m", path);
pfree(path);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.54 2001/10/25 05:49:43 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.55 2002/03/02 21:39:30 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (smgrsw[i].smgr_sync)
{
if ((*(smgrsw[i].smgr_sync)) () == SM_FAIL)
- elog(STOP, "storage sync failed on %s: %m",
+ elog(PANIC, "storage sync failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.251 2002/03/01 22:45:13 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/tcop/postgres.c,v 1.252 2002/03/02 21:39:31 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
List *raw_parsetree_list;
if (Debug_print_query)
- elog(DEBUG, "query: %s", query_string);
+ elog(LOG, "query: %s", query_string);
if (Show_parser_stats)
ResetUsage();
{
if (Debug_pretty_print)
{
- elog(DEBUG, "parse tree:");
+ elog(LOG, "parse tree:");
nodeDisplay(querytree);
}
else
- elog(DEBUG, "parse tree: %s", nodeToString(querytree));
+ elog(LOG, "parse tree: %s", nodeToString(querytree));
}
if (querytree->commandType == CMD_UTILITY)
{
if (Debug_pretty_print)
{
- elog(DEBUG, "rewritten parse tree:");
+ elog(LOG, "rewritten parse tree:");
foreach(list_item, querytree_list)
{
querytree = (Query *) lfirst(list_item);
}
else
{
- elog(DEBUG, "rewritten parse tree:");
+ elog(LOG, "rewritten parse tree:");
foreach(list_item, querytree_list)
{
querytree = (Query *) lfirst(list_item);
- elog(DEBUG, "%s", nodeToString(querytree));
+ elog(LOG, "%s", nodeToString(querytree));
}
}
}
{
if (Debug_pretty_print)
{
- elog(DEBUG, "plan:");
+ elog(LOG, "plan:");
nodeDisplay(plan);
}
else
- elog(DEBUG, "plan: %s", nodeToString(plan));
+ elog(LOG, "plan: %s", nodeToString(plan));
}
return plan;
* process utility functions (create, destroy, etc..)
*/
if (Debug_print_query)
- elog(DEBUG, "ProcessUtility: %s", query_string);
- else if (DebugLvl > 1)
- elog(DEBUG, "ProcessUtility");
+ elog(LOG, "ProcessUtility: %s", query_string);
+ else elog(DEBUG2, "ProcessUtility");
if (querytree->originalQuery)
{
}
else
{
- if (DebugLvl > 1)
- elog(DEBUG, "ProcessQuery");
+ elog(DEBUG2, "ProcessQuery");
if (querytree->originalQuery)
{
static void
start_xact_command(void)
{
- if (DebugLvl >= 1)
- elog(DEBUG, "StartTransactionCommand");
+ elog(DEBUG1, "StartTransactionCommand");
StartTransactionCommand();
}
DeferredTriggerEndQuery();
/* Now commit the command */
- if (DebugLvl >= 1)
- elog(DEBUG, "CommitTransactionCommand");
+ elog(DEBUG1, "CommitTransactionCommand");
CommitTransactionCommand();
#endif
printf(" -B NBUFFERS number of shared buffers (default %d)\n", DEF_NBUFFERS);
printf(" -c NAME=VALUE set run-time parameter\n");
- printf(" -d 1-5 debugging level\n");
+ printf(" -d 1-5,0 debugging level (0 is off)\n");
printf(" -D DATADIR database directory\n");
printf(" -e use European date format\n");
printf(" -E echo query before execution\n");
break;
case 'd': /* debug level */
- SetConfigOption("debug_level", optarg, ctx, gucsource);
- if (DebugLvl >= 1)
- SetConfigOption("log_connections", "true", ctx, gucsource);
- if (DebugLvl >= 2)
- SetConfigOption("debug_print_query", "true", ctx, gucsource);
- if (DebugLvl >= 3)
- SetConfigOption("debug_print_parse", "true", ctx, gucsource);
- if (DebugLvl >= 4)
- SetConfigOption("debug_print_plan", "true", ctx, gucsource);
- if (DebugLvl >= 5)
- SetConfigOption("debug_print_rewritten", "true", ctx, gucsource);
+ {
+ /* Set server debugging level. */
+ if (atoi(optarg) != 0)
+ {
+ char *debugstr = palloc(strlen("debug") + strlen(optarg) + 1);
+
+ sprintf(debugstr, "debug%s", optarg);
+ SetConfigOption("server_min_messages", debugstr, ctx, gucsource);
+ pfree(debugstr);
+ /*
+ * -d is not the same as setting client_min_messages
+ * because it enables other output options.
+ */
+ if (atoi(optarg) >= 1)
+ SetConfigOption("log_connections", "true", ctx, gucsource);
+ if (atoi(optarg) >= 2)
+ SetConfigOption("debug_print_query", "true", ctx, gucsource);
+ if (atoi(optarg) >= 3)
+ SetConfigOption("debug_print_parse", "true", ctx, gucsource);
+ if (atoi(optarg) >= 4)
+ SetConfigOption("debug_print_plan", "true", ctx, gucsource);
+ if (atoi(optarg) >= 5)
+ SetConfigOption("debug_print_rewritten", "true", ctx, gucsource);
+ }
+ else
+ /*
+ * -d 0 allows user to prevent postmaster debug from
+ * propogating to backend.
+ */
+ SetConfigOption("server_min_messages", "notice", PGC_POSTMASTER, PGC_S_ARGV);
+ }
break;
case 'E':
* putting it inside InitPostgres() instead. In particular, anything
* that involves database access should be there, not here.
*/
- if (DebugLvl > 1)
- elog(DEBUG, "InitPostgres");
+ elog(DEBUG2, "InitPostgres");
InitPostgres(DBName, username);
SetProcessingMode(NormalProcessing);
if (!IsUnderPostmaster)
{
puts("\nPOSTGRES backend interactive interface ");
- puts("$Revision: 1.251 $ $Date: 2002/03/01 22:45:13 $\n");
+ puts("$Revision: 1.252 $ $Date: 2002/03/02 21:39:31 $\n");
}
/*
MemoryContextSwitchTo(ErrorContext);
/* Do the recovery */
- if (DebugLvl >= 1)
- elog(DEBUG, "AbortCurrentTransaction");
+ elog(DEBUG1, "AbortCurrentTransaction");
AbortCurrentTransaction();
/*
if (str.data[str.len-1] == '\n')
str.data[--str.len] = '\0';
- elog(DEBUG, "%s\n%s", title, str.data);
+ elog(LOG, "%s\n%s", title, str.data);
pfree(str.data);
}
if (assert_enabled)
{
/* val != 0 should be trapped by previous Assert */
- elog(NOTICE, "Assert test successfull (val = %d)", val);
+ elog(INFO, "Assert test successfull (val = %d)", val);
}
else
- elog(NOTICE, "Assert checking is disabled (val = %d)", val);
+ elog(INFO, "Assert checking is disabled (val = %d)", val);
return val;
}
break;
default:
- elog(DEBUG, "CreateCommandTag: unknown parse node type %d",
+ elog(LOG, "CreateCommandTag: unknown parse node type %d",
nodeTag(parsetree));
tag = "???";
break;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.67 2002/02/18 23:11:22 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/acl.c,v 1.68 2002/03/02 21:39:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(s && aip && modechg);
#ifdef ACLDEBUG
- elog(DEBUG, "aclparse: input = '%s'", s);
+ elog(LOG, "aclparse: input = '%s'", s);
#endif
aip->ai_idtype = ACL_IDTYPE_UID;
s = getid(s, name);
}
#ifdef ACLDEBUG
- elog(DEBUG, "aclparse: correctly read [%x %d %x], modechg=%x",
+ elog(LOG, "aclparse: correctly read [%x %d %x], modechg=%x",
aip->ai_idtype, aip->ai_id, aip->ai_mode, *modechg);
#endif
return s;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.23 2001/11/19 19:51:20 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/format_type.c,v 1.24 2002/03/02 21:39:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
fieldstr = "";
break;
default:
- elog(DEBUG, "Invalid INTERVAL typmod 0x%x", typemod);
+ elog(LOG, "Invalid INTERVAL typmod 0x%x", typemod);
fieldstr = "";
break;
}
* The PostgreSQL locale utils.
*
*
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.13 2001/11/05 17:46:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/pg_locale.c,v 1.14 2002/03/02 21:39:32 momjian Exp $
*
* Portions Copyright (c) 1999-2000, PostgreSQL Global Development Group
*
PGLC_debug_lc(PG_LocaleCategories *lc)
{
#ifdef LC_MESSAGES
- elog(DEBUG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\nLC_MESSAGES:\t%s\n",
+ elog(LOG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\nLC_MESSAGES:\t%s\n",
lc->lang,
lc->lc_ctype,
lc->lc_numeric,
lc->lc_monetary,
lc->lc_messages);
#else
- elog(DEBUG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\n",
+ elog(LOG, "CURRENT LOCALE ENVIRONMENT:\n\nLANG: \t%s\nLC_CTYPE:\t%s\nLC_NUMERIC:\t%s\nLC_TIME:\t%s\nLC_COLLATE:\t%s\nLC_MONETARY:\t%s\n",
lc->lang,
lc->lc_ctype,
lc->lc_numeric,
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.104 2002/03/01 04:09:25 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v 1.105 2002/03/02 21:39:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return (bool) result;
localeptr = setlocale(LC_COLLATE, NULL);
if (!localeptr)
- elog(STOP, "Invalid LC_COLLATE setting");
+ elog(PANIC, "Invalid LC_COLLATE setting");
/*
* Currently we accept only "C" and "POSIX" (do any systems still
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.88 2002/02/25 04:06:50 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/cache/catcache.c,v 1.89 2002/03/02 21:39:32 momjian Exp $
*
*-------------------------------------------------------------------------
*/
long cc_hits = 0;
long cc_newloads = 0;
- elog(DEBUG, "Catcache stats dump: %d/%d tuples in catcaches",
+ elog(LOG, "Catcache stats dump: %d/%d tuples in catcaches",
CacheHdr->ch_ntup, CacheHdr->ch_maxtup);
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
{
if (cache->cc_ntup == 0 && cache->cc_searches == 0)
continue; /* don't print unused caches */
- elog(DEBUG, "Catcache %s/%s: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
+ elog(LOG, "Catcache %s/%s: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
cache->cc_relname,
cache->cc_indname,
cache->cc_ntup,
cc_hits += cache->cc_hits;
cc_newloads += cache->cc_newloads;
}
- elog(DEBUG, "Catcache totals: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
+ elog(LOG, "Catcache totals: %d tup, %ld srch, %ld hits, %ld loads, %ld not found",
CacheHdr->ch_ntup,
cc_searches,
cc_hits,
*/
#ifdef CACHEDEBUG
#define CatalogCacheInitializeCache_DEBUG1 \
- elog(DEBUG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
+ elog(LOG, "CatalogCacheInitializeCache: cache @%p %s", cache, \
cache->cc_relname)
#define CatalogCacheInitializeCache_DEBUG2 \
do { \
if (cache->cc_key[i] > 0) { \
- elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
+ elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d, %u", \
i+1, cache->cc_nkeys, cache->cc_key[i], \
tupdesc->attrs[cache->cc_key[i] - 1]->atttypid); \
} else { \
- elog(DEBUG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
+ elog(LOG, "CatalogCacheInitializeCache: load %d/%d w/%d", \
i+1, cache->cc_nkeys, cache->cc_key[i]); \
} \
} while(0)
heap_close(relation, NoLock);
- CACHE3_elog(DEBUG, "CatalogCacheInitializeCache: %s, %d keys",
+ CACHE3_elog(LOG, "CatalogCacheInitializeCache: %s, %d keys",
cache->cc_relname, cache->cc_nkeys);
/*
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_key[i];
- CACHE4_elog(DEBUG, "CatalogCacheInit %s %d %p",
+ CACHE4_elog(LOG, "CatalogCacheInit %s %d %p",
cache->cc_relname,
i,
cache);
{
uint32 hashIndex = 0;
- CACHE4_elog(DEBUG, "CatalogCacheComputeHashIndex %s %d %p",
+ CACHE4_elog(LOG, "CatalogCacheComputeHashIndex %s %d %p",
cache->cc_relname,
cache->cc_nkeys,
cache);
* sanity checks
*/
Assert(ItemPointerIsValid(pointer));
- CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: called");
+ CACHE1_elog(LOG, "CatalogCacheIdInvalidate: called");
/*
* inspect caches to find the proper cache
ct->dead = true;
else
CatCacheRemoveCTup(ccp, ct);
- CACHE1_elog(DEBUG, "CatalogCacheIdInvalidate: invalidated");
+ CACHE1_elog(LOG, "CatalogCacheIdInvalidate: invalidated");
/* could be multiple matches, so keep looking! */
}
}
{
CatCache *cache;
- CACHE1_elog(DEBUG, "ResetCatalogCaches called");
+ CACHE1_elog(LOG, "ResetCatalogCaches called");
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
ResetCatalogCache(cache);
- CACHE1_elog(DEBUG, "end of ResetCatalogCaches call");
+ CACHE1_elog(LOG, "end of ResetCatalogCaches call");
}
/*
{
CatCache *cache;
- CACHE2_elog(DEBUG, "CatalogCacheFlushRelation called for %u", relId);
+ CACHE2_elog(LOG, "CatalogCacheFlushRelation called for %u", relId);
for (cache = CacheHdr->ch_caches; cache; cache = cache->cc_next)
{
}
}
- CACHE1_elog(DEBUG, "end of CatalogCacheFlushRelation call");
+ CACHE1_elog(LOG, "end of CatalogCacheFlushRelation call");
}
/*
#ifdef CACHEDEBUG
#define InitCatCache_DEBUG1 \
do { \
- elog(DEBUG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
+ elog(LOG, "InitCatCache: rel=%s id=%d nkeys=%d size=%d\n", \
cp->cc_relname, cp->id, cp->cc_nkeys, cp->cc_size); \
} while(0)
DLMoveToFront(&ct->cache_elem);
#ifdef CACHEDEBUG
- CACHE3_elog(DEBUG, "SearchCatCache(%s): found in bucket %d",
+ CACHE3_elog(LOG, "SearchCatCache(%s): found in bucket %d",
cache->cc_relname, hash);
#endif /* CACHEDEBUG */
Buffer buffer;
int i;
- CACHE2_elog(DEBUG, "SearchCatCache(%s): performing index scan",
+ CACHE2_elog(LOG, "SearchCatCache(%s): performing index scan",
cache->cc_relname);
/*
{
HeapScanDesc sd;
- CACHE2_elog(DEBUG, "SearchCatCache(%s): performing heap scan",
+ CACHE2_elog(LOG, "SearchCatCache(%s): performing heap scan",
cache->cc_relname);
sd = heap_beginscan(relation, 0, SnapshotNow,
* Finish initializing the CatCTup header, and add it to the linked
* lists.
*/
- CACHE1_elog(DEBUG, "SearchCatCache: found tuple");
+ CACHE1_elog(LOG, "SearchCatCache: found tuple");
ct->ct_magic = CT_MAGIC;
ct->my_cache = cache;
if (oldct->refcount == 0)
{
- CACHE2_elog(DEBUG, "SearchCatCache(%s): Overflow, LRU removal",
+ CACHE2_elog(LOG, "SearchCatCache(%s): Overflow, LRU removal",
cache->cc_relname);
CatCacheRemoveCTup(oldct->my_cache, oldct);
break;
}
}
- CACHE4_elog(DEBUG, "SearchCatCache(%s): Contains %d/%d tuples",
+ CACHE4_elog(LOG, "SearchCatCache(%s): Contains %d/%d tuples",
cache->cc_relname, cache->cc_ntup, CacheHdr->ch_ntup);
- CACHE3_elog(DEBUG, "SearchCatCache(%s): put in bucket %d",
+ CACHE3_elog(LOG, "SearchCatCache(%s): put in bucket %d",
cache->cc_relname, hash);
return &ct->tuple;
{
CatCache *ccp;
- CACHE1_elog(DEBUG, "PrepareToInvalidateCacheTuple: called");
+ CACHE1_elog(LOG, "PrepareToInvalidateCacheTuple: called");
/*
* sanity checks
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.91 2001/11/05 17:46:30 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/error/elog.c,v 1.92 2002/03/02 21:39:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "mb/pg_wchar.h"
#endif
+#define DEFAULT_SERVER_MIN_MESSAGES_STR "notice"
+int server_min_messages;
+char *server_min_messages_str = NULL;
+const char server_min_messages_str_default[] = DEFAULT_SERVER_MIN_MESSAGES_STR;
+
+#define DEFAULT_CLIENT_MIN_MESSAGES_STR "info"
+int client_min_messages;
+char *client_min_messages_str = NULL;
+const char client_min_messages_str_default[] = DEFAULT_CLIENT_MIN_MESSAGES_STR;
+
#ifdef ENABLE_SYSLOG
/*
* 0 = only stdout/stderr
* Note that we use malloc() not palloc() because we want to retain
* control if we run out of memory. palloc() would recursively call
* elog(ERROR), which would be all right except if we are working on a
- * FATAL or REALLYFATAL error. We'd lose track of the fatal condition
+ * FATAL or PANIC error. We'd lose track of the fatal condition
* and report a mere ERROR to outer loop, which would be a Bad Thing.
* So, we substitute an appropriate message in-place, without
* downgrading the level if it's above ERROR.
const char *cp;
char *bp;
size_t space_needed;
+ bool output_to_server = false;
+ bool output_to_client = false;
/* size of the prefix needed for timestamp and pid, if enabled */
size_t timestamp_size;
+ /* Check for old elog calls. Codes were renumbered in 7.3. 2002-02-24 */
+ if (lev < DEBUG5)
+ elog(FATAL, "Pre-7.3 object file made an elog() call. Recompile.");
+
+ if (Use_syslog <= 1 || whereToSendOutput == Debug)
+ {
+ if (lev == LOG)
+ {
+ if (server_min_messages == LOG)
+ output_to_server = true;
+ else if (server_min_messages < FATAL)
+ output_to_server = true;
+ }
+ /* lev != LOG */
+ else
+ {
+ if (server_min_messages == LOG)
+ {
+ if (lev >= FATAL)
+ output_to_server = true;
+ }
+ /* Neither is LOG */
+ else if (lev >= server_min_messages)
+ output_to_server = true;
+ }
+ }
+
+ if (lev >= client_min_messages && whereToSendOutput == Remote)
+ output_to_client = true;
+
+ /* optimization to prevent work for messages that would never be output */
+ if (lev < ERROR && Use_syslog < 1 &&
+ output_to_server == false && output_to_client == false)
+ return;
+
/* Save error str before calling any function that might change errno */
errorstr = useful_strerror(errno);
lev = FATAL;
/*
- * If we are inside a critical section, all errors become REALLYFATAL
+ * If we are inside a critical section, all errors become PANIC
* errors. See miscadmin.h.
*/
if (lev == ERROR || lev == FATAL)
{
if (CritSectionCount > 0)
- lev = REALLYFATAL;
+ lev = PANIC;
}
prefix = elog_message_prefix(lev);
* vsnprintf won't know what to do with %m). To keep space
* calculation simple, we only allow one %m.
*/
- space_needed = timestamp_size + strlen(prefix)
- + strlen(fmt) + strlen(errorstr) + 1;
+ space_needed = timestamp_size + strlen(prefix) +
+ strlen(fmt) + strlen(errorstr) + 1;
if (copy_lineno)
{
- /* translator: This string will be truncated at 31 characters. */
+ /*
+ * Prints the failure line of the COPY. Wow, what a hack! bjm
+ * Translators: Error message will be truncated at 31 characters.
+ */
snprintf(copylineno_buf, 32, gettext("copy: line %d, "), copy_lineno);
space_needed += strlen(copylineno_buf);
}
{
/* We're up against it, convert to out-of-memory error */
fmt_buf = fmt_fixedbuf;
- if (lev != FATAL && lev != REALLYFATAL)
+ if (lev != FATAL && lev != PANIC)
{
lev = ERROR;
prefix = elog_message_prefix(lev);
if (copy_lineno)
{
strcat(fmt_buf, copylineno_buf);
- if (lev == ERROR || lev == FATAL || lev == REALLYFATAL)
+ if (lev == ERROR || lev == FATAL || lev == PANIC)
copy_lineno = 0;
}
{
/* We're up against it, convert to out-of-memory error */
msg_buf = msg_fixedbuf;
- if (lev != FATAL && lev != REALLYFATAL)
+ if (lev != FATAL && lev != PANIC)
{
lev = ERROR;
prefix = elog_message_prefix(lev);
switch (lev)
{
- case DEBUG:
+ case DEBUG1:
+ case DEBUG2:
+ case DEBUG3:
+ case DEBUG4:
+ case DEBUG5:
syslog_level = LOG_DEBUG;
break;
+ case LOG:
+ case INFO:
+ syslog_level = LOG_INFO;
+ break;
case NOTICE:
syslog_level = LOG_NOTICE;
break;
case FATAL:
syslog_level = LOG_ERR;
break;
- case REALLYFATAL:
+ case PANIC:
default:
syslog_level = LOG_CRIT;
break;
/* syslog doesn't want a trailing newline, but other destinations do */
strcat(msg_buf, "\n");
- /* write to terminal */
- if (Use_syslog <= 1 || whereToSendOutput == Debug)
+ /* Write to server logs or server terminal */
+ if (output_to_server)
write(2, msg_buf, strlen(msg_buf));
- if (lev > DEBUG && whereToSendOutput == Remote)
+ /* Should we output to the client too? */
+ if (output_to_client)
{
/* Send IPC message to the front-end program */
MemoryContext oldcxt;
*/
oldcxt = MemoryContextSwitchTo(ErrorContext);
- if (lev == NOTICE)
+ if (lev <= NOTICE)
/* exclude the timestamp from msg sent to frontend */
send_notice_to_frontend(msg_buf + timestamp_size);
else
* Guard against infinite loop from elog() during error recovery.
*/
if (InError)
- elog(REALLYFATAL, "elog: error during error recovery, giving up!");
+ elog(PANIC, "elog: error during error recovery, giving up!");
InError = true;
/*
siglongjmp(Warn_restart, 1);
}
- if (lev == FATAL || lev == REALLYFATAL)
+ if (lev == FATAL || lev == PANIC)
{
/*
* Serious crash time. Postmaster will observe nonzero process
{
StringInfoData buf;
- AssertArg(type == NOTICE || type == ERROR);
+ AssertArg(type <= ERROR);
pq_beginmessage(&buf);
- pq_sendbyte(&buf, type == NOTICE ? 'N' : 'E');
+ pq_sendbyte(&buf, type != ERROR ? 'N' : 'E'); /* N is INFO or NOTICE */
pq_sendstring(&buf, msg);
pq_endmessage(&buf);
switch (lev)
{
- case DEBUG:
+ case DEBUG1:
+ case DEBUG2:
+ case DEBUG3:
+ case DEBUG4:
+ case DEBUG5:
prefix = gettext("DEBUG: ");
break;
+ case LOG:
+ prefix = gettext("LOG: ");
+ break;
+ case INFO:
+ prefix = gettext("INFO: ");
+ break;
case NOTICE:
prefix = gettext("NOTICE: ");
break;
prefix = gettext("ERROR: ");
break;
case FATAL:
- prefix = gettext("FATAL 1: ");
+ prefix = gettext("FATAL: ");
break;
- case REALLYFATAL:
- prefix = gettext("FATAL 2: ");
+ case PANIC:
+ prefix = gettext("PANIC: ");
break;
}
Assert(prefix != NULL);
return prefix;
}
+
+
+/*
+ * GUC support routines
+ */
+
+bool
+check_server_min_messages(const char *lev)
+{
+ if (strcasecmp(lev, "debug") == 0 ||
+ strcasecmp(lev, "debug1") == 0 ||
+ strcasecmp(lev, "debug2") == 0 ||
+ strcasecmp(lev, "debub3") == 0 ||
+ strcasecmp(lev, "debug4") == 0 ||
+ strcasecmp(lev, "debug5") == 0 ||
+ strcasecmp(lev, "log") == 0 ||
+ strcasecmp(lev, "info") == 0 ||
+ strcasecmp(lev, "notice") == 0 ||
+ strcasecmp(lev, "error") == 0 ||
+ strcasecmp(lev, "fatal") == 0 ||
+ strcasecmp(lev, "panic") == 0)
+ return true;
+ return false;
+}
+
+void
+assign_server_min_messages(const char *lev)
+{
+ if (strcasecmp(lev, "debug1") == 0)
+ server_min_messages = DEBUG1;
+ else if (strcasecmp(lev, "debug2") == 0)
+ server_min_messages = DEBUG2;
+ else if (strcasecmp(lev, "debug3") == 0)
+ server_min_messages = DEBUG3;
+ else if (strcasecmp(lev, "debug4") == 0)
+ server_min_messages = DEBUG4;
+ else if (strcasecmp(lev, "debug5") == 0)
+ server_min_messages = DEBUG5;
+ else if (strcasecmp(lev, "log") == 0)
+ server_min_messages = LOG;
+ else if (strcasecmp(lev, "info") == 0)
+ server_min_messages = INFO;
+ else if (strcasecmp(lev, "notice") == 0)
+ server_min_messages = NOTICE;
+ else if (strcasecmp(lev, "error") == 0)
+ server_min_messages = ERROR;
+ else if (strcasecmp(lev, "fatal") == 0)
+ server_min_messages = FATAL;
+ else if (strcasecmp(lev, "panic") == 0)
+ server_min_messages = PANIC;
+ else
+ /* Can't get here unless guc.c screwed up */
+ elog(ERROR, "bogus server_min_messages %s", lev);
+}
+
+bool
+check_client_min_messages(const char *lev)
+{
+ if (strcasecmp(lev, "debug") == 0 ||
+ strcasecmp(lev, "debug1") == 0 ||
+ strcasecmp(lev, "debug2") == 0 ||
+ strcasecmp(lev, "debug3") == 0 ||
+ strcasecmp(lev, "debug4") == 0 ||
+ strcasecmp(lev, "debug5") == 0 ||
+ strcasecmp(lev, "log") == 0 ||
+ strcasecmp(lev, "info") == 0 ||
+ strcasecmp(lev, "notice") == 0 ||
+ strcasecmp(lev, "error") == 0)
+ return true;
+ return false;
+}
+
+void
+assign_client_min_messages(const char *lev)
+{
+ if (strcasecmp(lev, "debug1") == 0)
+ client_min_messages = DEBUG1;
+ else if (strcasecmp(lev, "debug2") == 0)
+ client_min_messages = DEBUG2;
+ else if (strcasecmp(lev, "debug3") == 0)
+ client_min_messages = DEBUG3;
+ else if (strcasecmp(lev, "debug4") == 0)
+ client_min_messages = DEBUG4;
+ else if (strcasecmp(lev, "debug5") == 0)
+ client_min_messages = DEBUG5;
+ else if (strcasecmp(lev, "log") == 0)
+ client_min_messages = LOG;
+ else if (strcasecmp(lev, "info") == 0)
+ client_min_messages = INFO;
+ else if (strcasecmp(lev, "notice") == 0)
+ client_min_messages = NOTICE;
+ else if (strcasecmp(lev, "error") == 0)
+ client_min_messages = ERROR;
+ else
+ /* Can't get here unless guc.c screwed up */
+ elog(ERROR, "bogus client_min_messages %s", lev);
+}
+
+
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.54 2001/10/25 05:49:48 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.55 2002/03/02 21:39:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
sprintf(full, "%s/%s", mangled, basename);
pfree(mangled);
- if (DebugLvl > 1)
- elog(DEBUG, "find_in_dynamic_libpath: trying %s", full);
+ elog(DEBUG2, "find_in_dynamic_libpath: trying %s", full);
if (file_exists(full))
return full;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.40 2001/10/28 06:25:54 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/hash/dynahash.c,v 1.41 2002/03/02 21:39:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* systemwide restart. Otherwise, just shut down this one backend.
*/
if (hashp->isshared)
- elog(STOP, "Hash table '%s' corrupted", hashp->tabname);
+ elog(PANIC, "Hash table '%s' corrupted", hashp->tabname);
else
elog(FATAL, "Hash table '%s' corrupted", hashp->tabname);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/Attic/findbe.c,v 1.26 2002/02/08 16:30:11 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/Attic/findbe.c,v 1.27 2002/03/02 21:39:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
if (stat(path, &buf) < 0)
{
- if (DebugLvl > 1)
- fprintf(stderr, "ValidateBinary: can't stat \"%s\"\n",
- path);
+ elog(DEBUG2, "ValidateBinary: can't stat \"%s\"", path);
return -1;
}
if ((buf.st_mode & S_IFMT) != S_IFREG)
{
- if (DebugLvl > 1)
- fprintf(stderr, "ValidateBinary: \"%s\" is not a regular file\n",
- path);
+ elog(DEBUG2, "ValidateBinary: \"%s\" is not a regular file", path);
return -1;
}
{
is_r = buf.st_mode & S_IRUSR;
is_x = buf.st_mode & S_IXUSR;
- if (DebugLvl > 1 && !(is_r && is_x))
- fprintf(stderr, "ValidateBinary: \"%s\" is not user read/execute\n",
- path);
+ if (!(is_r && is_x))
+ elog(DEBUG2, "ValidateBinary: \"%s\" is not user read/execute", path);
return is_x ? (is_r ? 0 : -2) : -1;
}
pwp = getpwuid(euid);
{
is_r = buf.st_mode & S_IRGRP;
is_x = buf.st_mode & S_IXGRP;
- if (DebugLvl > 1 && !(is_r && is_x))
- fprintf(stderr, "ValidateBinary: \"%s\" is not group read/execute\n",
- path);
+ if (!(is_r && is_x))
+ elog(DEBUG2, "ValidateBinary: \"%s\" is not group read/execute",
+ path);
return is_x ? (is_r ? 0 : -2) : -1;
}
}
is_r = buf.st_mode & S_IROTH;
is_x = buf.st_mode & S_IXOTH;
- if (DebugLvl > 1 && !(is_r && is_x))
- fprintf(stderr, "ValidateBinary: \"%s\" is not other read/execute\n",
- path);
+ if (!(is_r && is_x))
+ elog(DEBUG2, "ValidateBinary: \"%s\" is not other read/execute",
+ path);
return is_x ? (is_r ? 0 : -2) : -1;
}
if (ValidateBinary(buf) == 0)
{
strncpy(full_path, buf, MAXPGPATH);
- if (DebugLvl)
- fprintf(stderr, "FindExec: found \"%s\" using argv[0]\n",
- full_path);
+ elog(DEBUG1, "FindExec: found \"%s\" using argv[0]", full_path);
return 0;
}
fprintf(stderr, "FindExec: invalid binary \"%s\"\n",
*/
if ((p = getenv("PATH")) && *p)
{
- if (DebugLvl)
- fprintf(stderr, "FindExec: searching PATH ...\n");
+ elog(DEBUG1, "FindExec: searching PATH ...");
path = strdup(p); /* make a modifiable copy */
for (startp = path, endp = strchr(path, ':');
startp && *startp;
{
case 0: /* found ok */
strncpy(full_path, buf, MAXPGPATH);
- if (DebugLvl)
- fprintf(stderr, "FindExec: found \"%s\" using PATH\n",
- full_path);
+ elog(DEBUG1, "FindExec: found \"%s\" using PATH",
+ full_path);
free(path);
return 0;
case -1: /* wasn't even a candidate, keep looking */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.62 2001/10/25 05:49:51 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/globals.c,v 1.63 2002/03/02 21:39:33 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
bool IsUnderPostmaster = false;
-int DebugLvl = 0;
-
int DateStyle = USE_ISO_DATES;
bool EuroDates = false;
bool HasCTZSet = false;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.83 2002/03/01 22:45:15 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/init/miscinit.c,v 1.84 2002/03/02 21:39:33 momjian Exp $
*
*-------------------------------------------------------------------------
*/
fd = open(directoryLockFile, O_RDWR | PG_BINARY, 0);
if (fd < 0)
{
- elog(DEBUG, "Failed to rewrite %s: %m", directoryLockFile);
+ elog(LOG, "Failed to rewrite %s: %m", directoryLockFile);
return;
}
len = read(fd, buffer, sizeof(buffer) - 100);
if (len <= 0)
{
- elog(DEBUG, "Failed to read %s: %m", directoryLockFile);
+ elog(LOG, "Failed to read %s: %m", directoryLockFile);
close(fd);
return;
}
if (ptr == NULL ||
(ptr = strchr(ptr + 1, '\n')) == NULL)
{
- elog(DEBUG, "Bogus data in %s", directoryLockFile);
+ elog(LOG, "Bogus data in %s", directoryLockFile);
close(fd);
return;
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(DEBUG, "Failed to write %s: %m", directoryLockFile);
+ elog(LOG, "Failed to write %s: %m", directoryLockFile);
close(fd);
return;
}
*
* Copyright 2000 by PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc-file.l,v 1.10 2002/02/23 01:31:36 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc-file.l,v 1.11 2002/03/02 21:39:33 momjian Exp $
*/
%{
Assert(context == PGC_POSTMASTER || context == PGC_BACKEND
|| context == PGC_SIGHUP);
Assert(DataDir);
- elevel = (context == PGC_SIGHUP) ? DEBUG : ERROR;
+ elevel = (context == PGC_SIGHUP) ? DEBUG3 : ERROR;
/*
* Open file
* Support for grand unified configuration scheme, including SET
* command, configuration file, and command line options.
*
- * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.60 2002/03/01 22:45:16 petere Exp $
+ * $Header: /cvsroot/pgsql/src/backend/utils/misc/guc.c,v 1.61 2002/03/02 21:39:34 momjian Exp $
*
* Copyright 2000 by PostgreSQL Global Development Group
* Written by Peter Eisentraut <peter_e@gmx.net>.
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/datetime.h"
+#include "utils/elog.h"
#include "pgstat.h"
1000, 25, INT_MAX, NULL, NULL
},
- {
- "debug_level", PGC_USERSET, PGC_S_DEFAULT, &DebugLvl,
- 0, 0, 16, NULL, NULL
- },
-
#ifdef LOCK_DEBUG
{
"trace_lock_oidmin", PGC_SUSET, PGC_S_DEFAULT, &Trace_lock_oidmin,
static struct config_string
ConfigureNamesString[] =
{
+ {
+ "client_min_messages", PGC_USERSET, PGC_S_DEFAULT, &client_min_messages_str,
+ client_min_messages_str_default, check_client_min_messages,
+ assign_client_min_messages
+ },
+
{
"default_transaction_isolation", PGC_USERSET, PGC_S_DEFAULT, &default_iso_level_string,
"read committed", check_defaultxactisolevel, assign_defaultxactisolevel
PG_KRB_SRVTAB, NULL, NULL
},
+ {
+ "server_min_messages", PGC_USERSET, PGC_S_DEFAULT, &server_min_messages_str,
+ server_min_messages_str_default, check_server_min_messages,
+ assign_server_min_messages
+ },
+
#ifdef ENABLE_SYSLOG
{
"syslog_facility", PGC_POSTMASTER, PGC_S_DEFAULT, &Syslog_facility,
bool makeDefault;
if (context == PGC_SIGHUP)
- elevel = DEBUG;
+ elevel = DEBUG1;
else if (guc_session_init)
elevel = NOTICE;
else
if (record->source > source)
{
- if (DebugLvl > 1)
- elog(DEBUG, "setting %s refused because previous source is higher",
- name);
+ elog(DEBUG2, "setting %s refused because previous source is higher",
+ name);
return false;
}
makeDefault = source < PGC_S_SESSION;
#
-# Debug display
+# Message display
#
+
+#server_min_messages = log # Values, in order of decreasing detail:
+ # debug5, debug4, debug3, debug2, debug1,
+ # info, notice, error, log, fatal, panic
+#client_min_messages = info # Values, in order of decreasing detail:
+ # debug5, debug4, debug3, debug2, debug1,
+ # log, info, notice, error
#silent_mode = false
#log_connections = false
# Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
# Portions Copyright (c) 1994, Regents of the University of California
#
-# $Header: /cvsroot/pgsql/src/bin/initdb/Attic/initdb.sh,v 1.144 2002/03/01 22:45:16 petere Exp $
+# $Header: /cvsroot/pgsql/src/bin/initdb/Attic/initdb.sh,v 1.145 2002/03/02 21:39:34 momjian Exp $
#
#-------------------------------------------------------------------------
if [ "$debug" = yes ]
then
- BACKEND_TALK_ARG="-d"
+ BACKEND_TALK_ARG="-d 5"
else
PGSQL_OPT="$PGSQL_OPT -o /dev/null"
fi
*
* Copyright 2000 by PostgreSQL Global Development Group
*
- * $Header: /cvsroot/pgsql/src/bin/psql/tab-complete.c,v 1.41 2001/11/05 17:46:31 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/bin/psql/tab-complete.c,v 1.42 2002/03/02 21:39:34 momjian Exp $
*/
/*----------------------------------------------------------------------
"ksqo",
"geqo",
"fsync",
+ "server_min_messages",
+ "client_min_messages",
"debug_assertions",
"debug_print_query",
"debug_print_parse",
"geqo_random_seed",
"sort_mem",
"vacuum_mem",
- "debug_level",
"max_expr_depth",
"commit_delay",
"commit_siblings",
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: bootstrap.h,v 1.26 2001/11/05 17:46:31 momjian Exp $
+ * $Id: bootstrap.h,v 1.27 2002/03/02 21:39:34 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Relation reldesc;
extern Form_pg_attribute attrtypes[MAXATTR];
extern int numattr;
-extern int DebugMode;
-
extern int BootstrapMain(int ac, char *av[]);
extern void index_register(char *heap, char *ind, IndexInfo *indexInfo);
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: miscadmin.h,v 1.99 2002/02/18 23:11:31 petere Exp $
+ * $Id: miscadmin.h,v 1.100 2002/03/02 21:39:34 momjian Exp $
*
* NOTES
* some of the information in this file should be moved to
extern bool IsUnderPostmaster;
-extern int DebugLvl;
-
/* Date/Time Configuration
*
* Constants to pass info from runtime environment:
* Portions Copyright (c) 1996-2001, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: elog.h,v 1.30 2001/11/05 17:46:36 momjian Exp $
+ * $Id: elog.h,v 1.31 2002/03/02 21:39:35 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define ELOG_H
/* Error level codes */
-#define NOTICE 0 /* random info, sent to frontend */
-#define ERROR (-1) /* user error - return to known state */
-#define FATAL 1 /* fatal error - abort process */
-#define REALLYFATAL 2 /* take down the other backends with me */
-#define DEBUG (-2) /* debug message */
+#define DEBUG5 10 /* sent only to server logs, label DEBUG */
+#define DEBUG4 11 /* logs in decreasing detail */
+#define DEBUG3 12
+#define DEBUG2 13
+#define DEBUG1 14
+#define LOG 15 /* sent only to server logs by default,
+ * label LOG. */
+#define INFO 16 /* sent only to client by default, for
+ * informative messages that are part of
+ * normal query operation. */
+#define NOTICE 17 /* sent to client and server by default,
+ * important messages, for unusual cases that
+ * should be reported but are not serious
+ * enough to abort the query. */
+#define ERROR 18 /* user error - return to known state */
+#define FATAL 19 /* fatal error - abort process */
+#define PANIC 20 /* take down the other backends with me */
-/* temporary nonsense... */
-#define STOP REALLYFATAL
-#define LOG DEBUG
+/*#define DEBUG DEBUG5*/ /* Backward compatibility with pre-7.3 */
/* Configurable parameters */
#ifdef ENABLE_SYSLOG
extern bool Log_timestamp;
extern bool Log_pid;
+extern char *server_min_messages_str;
+extern char *client_min_messages_str;
+extern const char server_min_messages_str_default[];
+extern const char client_min_messages_str_default[];
extern void
elog(int lev, const char *fmt,...)
extern int DebugFileOpen(void);
+extern bool check_server_min_messages(const char *lev);
+extern void assign_server_min_messages(const char *lev);
+extern bool check_client_min_messages(const char *lev);
+extern void assign_client_min_messages(const char *lev);
+
#endif /* ELOG_H */
int
elog_DEBUG(void)
{
- return DEBUG;
+ return LOG;
}
int
* procedural language
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/pl/plpgsql/src/gram.y,v 1.29 2001/11/29 22:57:37 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/pl/plpgsql/src/gram.y,v 1.30 2002/03/02 21:39:35 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
}
| K_DEBUG
{
- $$ = DEBUG;
+ $$ = DEBUG5;
}
;
* MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/pl/plpython/plpython.c,v 1.13 2001/11/16 18:04:31 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/pl/plpython/plpython.c,v 1.14 2002/03/02 21:39:35 momjian Exp $
*
*********************************************************************
*/
}
else
PLy_restart_in_progress += 1;
- if (proc)
+ if (proc)
{
Py_DECREF(proc->me);
}
* New RExec methods
*/
-PyObject*
+PyObject*
PLy_r_open(PyObject *self, PyObject* args)
{
PyErr_SetString(PyExc_IOError, "can't open files in restricted mode");
rexec_dict = ((PyClassObject*)rexec)->cl_dict;
/*
- * tweak the list of permitted modules, posix and sys functions
+ * tweak the list of permitted modules, posix and sys functions
*/
PyDict_SetItemString(rexec_dict, "ok_builtin_modules", PLy_importable_modules);
PyDict_SetItemString(rexec_dict, "ok_posix_names", PLy_ok_posix_names);
for ( ; methods->ml_name; ++methods) {
- /* get a wrapper for the built-in function */
+ /* get a wrapper for the built-in function */
PyObject *func = PyCFunction_New(methods, NULL);
PyObject *meth;
int status;
if (!func)
return -1;
- /* turn the function into an unbound method */
+ /* turn the function into an unbound method */
if (!(meth = PyMethod_New(func, NULL, klass))) {
Py_DECREF(func);
return -1;
}
/* add method to dictionary */
- status = PyDict_SetItemString( ((PyClassObject*)klass)->cl_dict,
+ status = PyDict_SetItemString( ((PyClassObject*)klass)->cl_dict,
methods->ml_name, meth);
Py_DECREF(meth);
Py_DECREF(func);
PyObject *
PLy_debug(PyObject * self, PyObject * args)
{
- return PLy_log(DEBUG, self, args);
+ return PLy_log(LOG, self, args);
}
PyObject *
}
/*
- * ok, this is a NOTICE, or DEBUG message
+ * ok, this is a NOTICE, or LOG message
*
* but just in case DON'T long jump out of the interpreter!
*/
char *PLy_procedure_name(PLyProcedure *proc)
{
- if ( proc == NULL )
- return "<unknown procedure>";
- return proc->proname;
+ if ( proc == NULL )
+ return "<unknown procedure>";
+ return proc->proname;
}
/* output a python traceback/exception via the postgresql elog
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/pl/tcl/pltcl.c,v 1.49 2002/01/24 19:31:36 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/pl/tcl/pltcl.c,v 1.50 2002/03/02 21:39:35 momjian Exp $
*
**********************************************************************/
else if (strcmp(argv[1], "FATAL") == 0)
level = FATAL;
else if (strcmp(argv[1], "DEBUG") == 0)
- level = DEBUG;
+ level = DEBUG1;
else
{
Tcl_AppendResult(interp, "Unknown elog level '", argv[1],
-- FOREIGN KEY CONSTRAINT adding TEST
CREATE TABLE tmp2 (a int primary key);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'tmp2_pkey' for table 'tmp2'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'tmp2_pkey' for table 'tmp2'
CREATE TABLE tmp3 (a int, b int);
CREATE TABLE tmp4 (a int, b int, unique(a,b));
-NOTICE: CREATE TABLE / UNIQUE will create implicit index 'tmp4_a_key' for table 'tmp4'
+INFO: CREATE TABLE / UNIQUE will create implicit index 'tmp4_a_key' for table 'tmp4'
CREATE TABLE tmp5 (a int, b int);
-- Insert rows into tmp2 (pktable)
INSERT INTO tmp2 values (1);
INSERT INTO tmp3 values (5,50);
-- Try (and fail) to add constraint due to invalid source columns
ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match full;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: ALTER TABLE: column "c" referenced in foreign key constraint does not exist
-- Try (and fail) to add constraint due to invalide destination columns explicitly given
ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) match full;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: UNIQUE constraint matching given keys for referenced table "tmp2" not found
-- Try (and fail) to add constraint due to invalid data
ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: tmpconstr referential integrity violation - key referenced from tmp3 not found in tmp2
-- Delete failing row
DELETE FROM tmp3 where a=5;
-- Try (and succeed)
ALTER TABLE tmp3 add constraint tmpconstr foreign key (a) references tmp2 match full;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Try (and fail) to create constraint from tmp5(a) to tmp4(a) - unique constraint on
-- tmp4 is a,b
ALTER TABLE tmp5 add constraint tmpconstr foreign key(a) references tmp4(a) match full;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: UNIQUE constraint matching given keys for referenced table "tmp4" not found
DROP TABLE tmp5;
DROP TABLE tmp4;
DROP TABLE tmp3;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "tmp2"
DROP TABLE tmp2;
-- Foreign key adding test with mixed types
-- Note: these tables are TEMP to avoid name conflicts when this test
-- is run in parallel with foreign_key.sql.
CREATE TEMP TABLE PKTABLE (ptest1 int PRIMARY KEY);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TEMP TABLE FKTABLE (ftest1 text);
-- This next should fail, because text=int does not exist
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- This should also fail for the same reason, but here we
-- give the column name
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- This should succeed, even though they are different types
DROP TABLE FKTABLE;
CREATE TEMP TABLE FKTABLE (ftest1 varchar);
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- As should this
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1) references pktable(ptest1);
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE pktable;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
DROP TABLE fktable;
CREATE TEMP TABLE PKTABLE (ptest1 int, ptest2 text,
PRIMARY KEY(ptest1, ptest2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-- This should fail, because we just chose really odd types
CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 datetime);
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2) references pktable;
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
-- Again, so should this...
CREATE TEMP TABLE FKTABLE (ftest1 cidr, ftest2 datetime);
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
references pktable(ptest1, ptest2);
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
-- This fails because we mixed up the column ordering
CREATE TEMP TABLE FKTABLE (ftest1 int, ftest2 text);
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest1, ftest2)
references pktable(ptest2, ptest1);
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
You will have to retype this query using an explicit cast
-- As does this...
ALTER TABLE FKTABLE ADD FOREIGN KEY(ftest2, ftest1)
references pktable(ptest1, ptest2);
-NOTICE: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: ALTER TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- temp tables should go away by themselves, need not drop them.
create table atacc1 ( test int );
-- add a unique constraint
alter table atacc1 add constraint atacc_test1 unique (test);
-NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
+INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
-- insert first value
insert into atacc1 (test) values (2);
-- should fail
insert into atacc1 (test) values (4);
-- try adding a unique oid constraint
alter table atacc1 add constraint atacc_oid1 unique(oid);
-NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_oid1' for table 'atacc1'
+INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_oid1' for table 'atacc1'
drop table atacc1;
-- let's do one where the unique constraint fails when added
create table atacc1 ( test int );
insert into atacc1 (test) values (2);
-- add a unique constraint (fails)
alter table atacc1 add constraint atacc_test1 unique (test);
-NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
+INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
ERROR: Cannot create unique index. Table contains non-unique values
insert into atacc1 (test) values (3);
drop table atacc1;
create table atacc1 ( test int, test2 int);
-- add a unique constraint
alter table atacc1 add constraint atacc_test1 unique (test, test2);
-NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
+INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc_test1' for table 'atacc1'
-- insert initial value
insert into atacc1 (test,test2) values (4,4);
-- should fail
drop table atacc1;
-- lets do some naming tests
create table atacc1 (test int, test2 int, unique(test));
-NOTICE: CREATE TABLE / UNIQUE will create implicit index 'atacc1_test_key' for table 'atacc1'
+INFO: CREATE TABLE / UNIQUE will create implicit index 'atacc1_test_key' for table 'atacc1'
alter table atacc1 add unique (test2);
-NOTICE: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc1_test2_key' for table 'atacc1'
+INFO: ALTER TABLE / ADD UNIQUE will create implicit index 'atacc1_test2_key' for table 'atacc1'
-- should fail for @@ second one @@
insert into atacc1 (test2, test) values (3, 3);
insert into atacc1 (test2, test) values (2, 3);
--- test creation of SERIAL column
---
CREATE TABLE serialTest (f1 text, f2 serial);
-NOTICE: CREATE TABLE will create implicit sequence 'serialtest_f2_seq' for SERIAL column 'serialtest.f2'
-NOTICE: CREATE TABLE / UNIQUE will create implicit index 'serialtest_f2_key' for table 'serialtest'
+INFO: CREATE TABLE will create implicit sequence 'serialtest_f2_seq' for SERIAL column 'serialtest.f2'
+INFO: CREATE TABLE / UNIQUE will create implicit index 'serialtest_f2_key' for table 'serialtest'
INSERT INTO serialTest VALUES ('foo');
INSERT INTO serialTest VALUES ('bar');
INSERT INTO serialTest VALUES ('force', 100);
CREATE TABLE stud_emp (
percent int4
) INHERITS (emp, student);
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "name"
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "age"
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "location"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "name"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "age"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "location"
CREATE TABLE city (
name name,
location box,
CREATE TABLE d_star (
d float8
) INHERITS (b_star, c_star);
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "class"
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "a"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "class"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "a"
CREATE TABLE e_star (
e int2
) INHERITS (c_star);
-- First test, check and cascade
--
CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL ON DELETE CASCADE ON UPDATE CASCADE, ftest2 int );
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert test data into PKTABLE
INSERT INTO PKTABLE VALUES (1, 'Test1');
INSERT INTO PKTABLE VALUES (2, 'Test2');
(3 rows)
DROP TABLE PKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
DROP TABLE FKTABLE;
--
-- check set NULL and table constraint on multiple columns
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, CONSTRAINT constrname FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET NULL ON UPDATE SET NULL);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert test data into PKTABLE
INSERT INTO PKTABLE VALUES (1, 2, 'Test1');
INSERT INTO PKTABLE VALUES (1, 3, 'Test1-2');
(5 rows)
DROP TABLE PKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
DROP TABLE FKTABLE;
--
-- check set default and table constraint on multiple columns
--
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 text, PRIMARY KEY(ptest1, ptest2) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int DEFAULT -1, ftest2 int DEFAULT -2, ftest3 int, CONSTRAINT constrname2 FOREIGN KEY(ftest1, ftest2)
REFERENCES PKTABLE MATCH FULL ON DELETE SET DEFAULT ON UPDATE SET DEFAULT);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert a value in PKTABLE for default
INSERT INTO PKTABLE VALUES (-1, -2, 'The Default!');
-- Insert test data into PKTABLE
(5 rows)
DROP TABLE PKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
DROP TABLE FKTABLE;
--
-- First test, check with no on delete or on update
--
CREATE TABLE PKTABLE ( ptest1 int PRIMARY KEY, ptest2 text );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int REFERENCES PKTABLE MATCH FULL, ftest2 int );
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert test data into PKTABLE
INSERT INTO PKTABLE VALUES (1, 'Test1');
INSERT INTO PKTABLE VALUES (2, 'Test2');
(4 rows)
DROP TABLE PKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "fktable"
DROP TABLE FKTABLE;
-- MATCH unspecified
-- Base test restricting update/delete
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert Primary Key values
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
(5 rows)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
-- cascade update/delete
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
ON DELETE CASCADE ON UPDATE CASCADE);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert Primary Key values
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
(4 rows)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
-- set null update / set default delete
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int, ftest3 int, ftest4 int, CONSTRAINT constrname3
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
ON DELETE SET DEFAULT ON UPDATE SET NULL);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert Primary Key values
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
(6 rows)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
-- set default update / set null delete
CREATE TABLE PKTABLE ( ptest1 int, ptest2 int, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2, ptest3) );
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE ( ftest1 int DEFAULT 0, ftest2 int DEFAULT -1, ftest3 int, ftest4 int, CONSTRAINT constrname3
FOREIGN KEY(ftest1, ftest2, ftest3) REFERENCES PKTABLE
ON DELETE SET NULL ON UPDATE SET DEFAULT);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- Insert Primary Key values
INSERT INTO PKTABLE VALUES (1, 2, 3, 'test1');
INSERT INTO PKTABLE VALUES (1, 3, 3, 'test2');
(7 rows)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
CREATE TABLE FKTABLE_FAIL1 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest2) REFERENCES PKTABLE);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: CREATE TABLE: column "ftest2" referenced in foreign key constraint does not exist
CREATE TABLE FKTABLE_FAIL2 ( ftest1 int, CONSTRAINT fkfail1 FOREIGN KEY (ftest1) REFERENCES PKTABLE(ptest2));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: UNIQUE constraint matching given keys for referenced table "pktable" not found
DROP TABLE FKTABLE_FAIL1;
ERROR: table "fktable_fail1" does not exist
DROP TABLE PKTABLE;
-- Test for referencing column number smaller than referenced constraint
CREATE TABLE PKTABLE (ptest1 int, ptest2 int, UNIQUE(ptest1, ptest2));
-NOTICE: CREATE TABLE / UNIQUE will create implicit index 'pktable_ptest1_key' for table 'pktable'
+INFO: CREATE TABLE / UNIQUE will create implicit index 'pktable_ptest1_key' for table 'pktable'
CREATE TABLE FKTABLE_FAIL1 (ftest1 int REFERENCES pktable(ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: UNIQUE constraint matching given keys for referenced table "pktable" not found
DROP TABLE FKTABLE_FAIL1;
ERROR: table "fktable_fail1" does not exist
--
-- Basic one column, two table setup
CREATE TABLE PKTABLE (ptest1 int PRIMARY KEY);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-- This next should fail, because text=int does not exist
CREATE TABLE FKTABLE (ftest1 text REFERENCES pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- This should also fail for the same reason, but here we
-- give the column name
CREATE TABLE FKTABLE (ftest1 text REFERENCES pktable(ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- This should succeed, even though they are different types
-- because varchar=int does exist
CREATE TABLE FKTABLE (ftest1 varchar REFERENCES pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-- As should this
CREATE TABLE FKTABLE (ftest1 varchar REFERENCES pktable(ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
-- Two columns, two tables
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, PRIMARY KEY(ptest1, ptest2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-- This should fail, because we just chose really odd types
CREATE TABLE FKTABLE (ftest1 cidr, ftest2 datetime, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
-- Again, so should this...
CREATE TABLE FKTABLE (ftest1 cidr, ftest2 datetime, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
-- This fails because we mixed up the column ordering
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- As does this...
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest1, ptest2));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- And again..
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest2, ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
You will have to retype this query using an explicit cast
-- This works...
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest2, ftest1) REFERENCES pktable(ptest2, ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-- As does this
CREATE TABLE FKTABLE (ftest1 int, ftest2 text, FOREIGN KEY(ftest1, ftest2) REFERENCES pktable(ptest1, ptest2));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE FKTABLE;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
DROP TABLE PKTABLE;
-- Two columns, same table
-- Make sure this still works...
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable(ptest1, ptest2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE PKTABLE;
-- And this,
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
DROP TABLE PKTABLE;
-- This shouldn't (mixed up columns)
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest3,
ptest4) REFERENCES pktable(ptest2, ptest1));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
You will have to retype this query using an explicit cast
-- Nor should this... (same reason, we have 4,3 referencing 1,2 which mismatches types
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
ptest3) REFERENCES pktable(ptest1, ptest2));
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
-- Not this one either... Same as the last one except we didn't defined the columns being referenced.
CREATE TABLE PKTABLE (ptest1 int, ptest2 text, ptest3 int, ptest4 text, PRIMARY KEY(ptest1, ptest2), FOREIGN KEY(ptest4,
ptest3) REFERENCES pktable);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
--
-- Basic 2 table case: 1 column of matching types.
create table pktable_base (base1 int not null);
create table pktable (ptest1 int, primary key(base1), unique(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE / UNIQUE will create implicit index 'pktable_base1_key' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / UNIQUE will create implicit index 'pktable_base1_key' for table 'pktable'
create table fktable (ftest1 int references pktable(base1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- now some ins, upd, del
insert into pktable(base1) values (1);
insert into pktable(base1) values (2);
delete from pktable where base1>3;
-- cleanup
drop table fktable;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
delete from pktable;
-- Now 2 columns 2 tables, matching types
create table fktable (ftest1 int, ftest2 int, foreign key(ftest1, ftest2) references pktable(base1, ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
-- now some ins, upd, del
insert into pktable(base1, ptest1) values (1, 1);
insert into pktable(base1, ptest1) values (2, 2);
delete from pktable where base1>3;
-- cleanup
drop table fktable;
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
-NOTICE: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
+INFO: DROP TABLE implicitly drops referential integrity trigger from table "pktable"
drop table pktable;
drop table pktable_base;
-- Now we'll do one all in 1 table with 2 columns of matching types
create table pktable_base(base1 int not null, base2 int);
create table pktable(ptest1 int, ptest2 int, primary key(base1, ptest1), foreign key(base2, ptest2) references
pktable(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
insert into pktable (base1, ptest1, base2, ptest2) values (1, 1, 1, 1);
insert into pktable (base1, ptest1, base2, ptest2) values (2, 1, 1, 1);
insert into pktable (base1, ptest1, base2, ptest2) values (2, 2, 2, 1);
-- 2 columns (2 tables), mismatched types
create table pktable_base(base1 int not null);
create table pktable(ptest1 text, primary key(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-- just generally bad types (with and without column references on the referenced table)
create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
create table fktable(ftest1 cidr, ftest2 int[], foreign key (ftest1, ftest2) references pktable(base1, ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'cidr' and 'integer'
You will have to retype this query using an explicit cast
-- let's mix up which columns reference which
create table fktable(ftest1 int, ftest2 text, foreign key(ftest2, ftest1) references pktable);
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
create table fktable(ftest1 int, ftest2 text, foreign key(ftest2, ftest1) references pktable(base1, ptest1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
create table fktable(ftest1 int, ftest2 text, foreign key(ftest1, ftest2) references pktable(ptest1, base1));
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
You will have to retype this query using an explicit cast
drop table pktable;
create table pktable_base(base1 int not null, base2 int);
create table pktable(ptest1 text, ptest2 text[], primary key(base1, ptest1), foreign key(base2, ptest2) references
pktable(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text[]' and 'text'
You will have to retype this query using an explicit cast
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(base2, ptest2) references
pktable(ptest1, base1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'integer' and 'text'
You will have to retype this query using an explicit cast
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(ptest2, base2) references
pktable(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
create table pktable(ptest1 text, ptest2 text, primary key(base1, ptest1), foreign key(ptest2, base2) references
pktable(base1, ptest1)) inherits (pktable_base);
-NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
-NOTICE: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
+INFO: CREATE TABLE / PRIMARY KEY will create implicit index 'pktable_pkey' for table 'pktable'
+INFO: CREATE TABLE will create implicit trigger(s) for FOREIGN KEY check(s)
ERROR: Unable to identify an operator '=' for types 'text' and 'integer'
You will have to retype this query using an explicit cast
drop table pktable;
--
SET DateStyle TO 'US,Postgres';
SHOW DateStyle;
-NOTICE: DateStyle is Postgres with US (NonEuropean) conventions
+INFO: DateStyle is Postgres with US (NonEuropean) conventions
SELECT '' AS "64", d1 AS us_postgres FROM TIMESTAMP_TBL;
64 | us_postgres
----+-----------------------------
SET DateStyle TO 'US,SQL';
SHOW DateStyle;
-NOTICE: DateStyle is SQL with US (NonEuropean) conventions
+INFO: DateStyle is SQL with US (NonEuropean) conventions
SELECT '' AS "64", d1 AS us_sql FROM TIMESTAMP_TBL;
64 | us_sql
----+------------------------
SET DateStyle TO 'European,Postgres';
SHOW DateStyle;
-NOTICE: DateStyle is Postgres with European conventions
+INFO: DateStyle is Postgres with European conventions
INSERT INTO TIMESTAMP_TBL VALUES('13/06/1957');
SELECT count(*) as one FROM TIMESTAMP_TBL WHERE d1 = 'Jun 13 1957';
one
SET DateStyle TO 'European,ISO';
SHOW DateStyle;
-NOTICE: DateStyle is ISO with European conventions
+INFO: DateStyle is ISO with European conventions
SELECT '' AS "65", d1 AS european_iso FROM TIMESTAMP_TBL;
65 | european_iso
----+------------------------
SET DateStyle TO 'European,SQL';
SHOW DateStyle;
-NOTICE: DateStyle is SQL with European conventions
+INFO: DateStyle is SQL with European conventions
SELECT '' AS "65", d1 AS european_sql FROM TIMESTAMP_TBL;
65 | european_sql
----+------------------------
CREATE TABLE b (bb TEXT) INHERITS (a);
CREATE TABLE c (cc TEXT) INHERITS (a);
CREATE TABLE d (dd TEXT) INHERITS (b,c,a);
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
-NOTICE: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
+INFO: CREATE TABLE: merging multiple inherited definitions of attribute "aa"
INSERT INTO a(aa) VALUES('aaa');
INSERT INTO a(aa) VALUES('aaaa');
INSERT INTO a(aa) VALUES('aaaaa');