*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.83 2002/09/27 15:04:08 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/heaptuple.c,v 1.84 2003/07/21 20:29:37 tgl Exp $
*
* NOTES
* The old interface functions have been converted to macros
case MinCommandIdAttributeNumber:
case MaxTransactionIdAttributeNumber:
case MaxCommandIdAttributeNumber:
+ /* these are never null */
break;
- case 0:
- elog(ERROR, "heap_attisnull: zero attnum disallowed");
-
default:
- elog(ERROR, "heap_attisnull: undefined negative attnum");
+ elog(ERROR, "invalid attnum: %d", attnum);
}
return 0;
result = ObjectIdGetDatum(tup->t_tableOid);
break;
default:
- elog(ERROR, "heap_getsysattr: invalid attnum %d", attnum);
+ elog(ERROR, "invalid attnum: %d", attnum);
result = 0; /* keep compiler quiet */
break;
}
int numberOfAttributes = tupleDescriptor->natts;
if (numberOfAttributes > MaxTupleAttributeNumber)
- elog(ERROR, "heap_formtuple: numberOfAttributes %d exceeds limit %d",
- numberOfAttributes, MaxTupleAttributeNumber);
+ ereport(ERROR,
+ (errcode(ERRCODE_TOO_MANY_COLUMNS),
+ errmsg("number of attributes %d exceeds limit, %d",
+ numberOfAttributes, MaxTupleAttributeNumber)));
for (i = 0; i < numberOfAttributes; i++)
{
* allocate and fill *value and *nulls arrays from either the tuple or
* the repl information, as appropriate.
*/
- value = (Datum *) palloc(numberOfAttributes * sizeof *value);
- nulls = (char *) palloc(numberOfAttributes * sizeof *nulls);
+ value = (Datum *) palloc(numberOfAttributes * sizeof(Datum));
+ nulls = (char *) palloc(numberOfAttributes * sizeof(char));
- for (attoff = 0;
- attoff < numberOfAttributes;
- attoff += 1)
+ for (attoff = 0; attoff < numberOfAttributes; attoff++)
{
-
if (repl[attoff] == ' ')
{
value[attoff] = heap_getattr(tuple,
nulls[attoff] = (isNull) ? 'n' : ' ';
}
- else if (repl[attoff] != 'r')
- elog(ERROR, "heap_modifytuple: repl is \\%3d", repl[attoff]);
- else
- { /* == 'r' */
+ else if (repl[attoff] == 'r')
+ {
value[attoff] = replValue[attoff];
nulls[attoff] = replNull[attoff];
}
+ else
+ elog(ERROR, "unrecognized replace flag: %d", (int) repl[attoff]);
}
/*
value,
nulls);
+ pfree(value);
+ pfree(nulls);
+
/*
* copy the identification info of the old tuple: t_ctid, t_self, and
* OID (if any)
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.64 2003/02/23 06:17:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/indextuple.c,v 1.65 2003/07/21 20:29:37 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#endif
if (numberOfAttributes > INDEX_MAX_KEYS)
- elog(ERROR, "index_formtuple: numberOfAttributes %d > %d",
- numberOfAttributes, INDEX_MAX_KEYS);
+ ereport(ERROR,
+ (errcode(ERRCODE_TOO_MANY_COLUMNS),
+ errmsg("number of index attributes %d exceeds limit, %d",
+ numberOfAttributes, INDEX_MAX_KEYS)));
#ifdef TOAST_INDEX_HACK
for (i = 0; i < numberOfAttributes; i++)
* it in t_info.
*/
if ((size & INDEX_SIZE_MASK) != size)
- elog(ERROR, "index_formtuple: data takes %lu bytes, max is %d",
- (unsigned long) size, INDEX_SIZE_MASK);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("index tuple requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.74 2003/05/26 17:51:38 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/printtup.c,v 1.75 2003/07/21 20:29:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
fmgr_info(thisState->typsend, &thisState->finfo);
}
else
- elog(ERROR, "Unsupported format code %d", format);
+ ereport(ERROR,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("unsupported format code: %d", format)));
}
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.95 2003/06/15 17:59:10 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/common/tupdesc.c,v 1.96 2003/07/21 20:29:38 tgl Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
ObjectIdGetDatum(oidtypeid),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
- elog(ERROR, "Unable to look up type id %u", oidtypeid);
+ elog(ERROR, "cache lookup failed for type %u", oidtypeid);
/*
* type info exists so we initialize our attribute information from
int natts;
if (!OidIsValid(relid))
- elog(ERROR, "Invalid typrelid for complex type %u", typeoid);
+ elog(ERROR, "invalid typrelid for complex type %u", typeoid);
rel = relation_open(relid, AccessShareLock);
tupdesc = CreateTupleDescCopy(RelationGetDescr(rel));
/* does the list length match the number of attributes? */
if (length(colaliases) != natts)
- elog(ERROR, "TypeGetTupleDesc: number of aliases does not match number of attributes");
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("number of aliases does not match number of attributes")));
/* OK, use the aliases instead */
for (varattno = 0; varattno < natts; varattno++)
/* the alias list is required for base types */
if (colaliases == NIL)
- elog(ERROR, "TypeGetTupleDesc: no column alias was provided");
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("no column alias was provided")));
/* the alias list length must be 1 */
if (length(colaliases) != 1)
- elog(ERROR, "TypeGetTupleDesc: number of aliases does not match number of attributes");
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("number of aliases does not match number of attributes")));
/* OK, get the column alias */
attname = strVal(lfirst(colaliases));
false);
}
else if (functyptype == 'p' && typeoid == RECORDOID)
- elog(ERROR, "Unable to determine tuple description for function returning \"record\"");
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("unable to determine tuple description for function returning record")));
else
{
/* crummy error message, but parser should have caught this */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.103 2003/05/27 17:49:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/gist/gist.c,v 1.104 2003/07/21 20:29:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* that's not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
- elog(ERROR, "%s already contains data",
+ elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* initialize the root page */
retval = PageAddItem(page, (Item) *newtup, IndexTupleSize(*newtup),
offsetNumber, flags);
if (retval == InvalidOffsetNumber)
- elog(ERROR, "gist: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
/* be tidy */
if (DatumGetPointer(tmpcentry.key) != NULL &&
l = PageAddItem(page, (Item) itup[i], IndexTupleSize(itup[i]),
off, LP_USED);
if (l == InvalidOffsetNumber)
- elog(ERROR, "gist: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
#endif
}
int i;
if (index->rd_att->natts > INDEX_MAX_KEYS)
- elog(ERROR, "initGISTstate: numberOfAttributes %d > %d",
+ elog(ERROR, "numberOfAttributes %d > %d",
index->rd_att->natts, INDEX_MAX_KEYS);
giststate->tupdesc = index->rd_att;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.63 2003/03/23 23:01:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hash.c,v 1.64 2003/07/21 20:29:38 tgl Exp $
*
* NOTES
* This file contains only the public interface routines.
* that's not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
- elog(ERROR, "%s already contains data",
+ elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* initialize the hash index metadata page */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.25 2002/06/20 20:29:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashinsert.c,v 1.26 2003/07/21 20:29:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/* we need a scan key to do our search, so build one */
itup = &(hitem->hash_itup);
if ((natts = rel->rd_rel->relnatts) != 1)
- elog(ERROR, "Hash indices valid for only one index key.");
+ elog(ERROR, "Hash indexes support only one index key");
itup_scankey = _hash_mkscankey(rel, itup);
/*
itup_off = OffsetNumberNext(PageGetMaxOffsetNumber(page));
if (PageAddItem(page, (Item) hitem, itemsize, itup_off, LP_USED)
== InvalidOffsetNumber)
- elog(ERROR, "_hash_pgaddtup: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
/* write the buffer, but hold our lock */
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.34 2003/03/10 22:28:18 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashovfl.c,v 1.35 2003/07/21 20:29:38 tgl Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
/* allocate an empty overflow page */
oaddr = _hash_getovfladdr(rel, metabufp);
if (oaddr == InvalidOvflAddress)
- elog(ERROR, "_hash_addovflpage: problem with _hash_getovfladdr.");
+ elog(ERROR, "_hash_getovfladdr failed");
ovflblkno = OADDR_TO_BLKNO(OADDR_OF(SPLITNUM(oaddr), OPAGENUM(oaddr)));
Assert(BlockNumberIsValid(ovflblkno));
ovflbuf = _hash_getbuf(rel, ovflblkno, HASH_WRITE);
offset = metap->hashm_spares[splitnum] -
(splitnum ? metap->hashm_spares[splitnum - 1] : 0);
-#define OVMSG "HASH: Out of overflow pages. Out of luck.\n"
-
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
- elog(ERROR, OVMSG);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
metap->hashm_ovflpoint = splitnum;
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
metap->hashm_spares[splitnum - 1]--;
free_page++;
if (free_page >= NCACHED)
- elog(ERROR, OVMSG);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
/*
* This is tricky. The 1 indicates that you want the new page
*/
if (_hash_initbitmap(rel, metap, OADDR_OF(splitnum, offset),
1, free_page))
- elog(ERROR, "overflow_page: problem with _hash_initbitmap.");
+ elog(ERROR, "_hash_initbitmap failed");
metap->hashm_spares[splitnum]++;
offset++;
if (offset > SPLITMASK)
{
if (++splitnum >= NCACHED)
- elog(ERROR, OVMSG);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
metap->hashm_ovflpoint = splitnum;
metap->hashm_spares[splitnum] = metap->hashm_spares[splitnum - 1];
metap->hashm_spares[splitnum - 1]--;
;
offset = (i ? bit - metap->hashm_spares[i - 1] : bit);
if (offset >= SPLITMASK)
- elog(ERROR, OVMSG);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("out of overflow pages in hash index \"%s\"",
+ RelationGetRelationName(rel))));
/* initialize this page */
oaddr = OADDR_OF(i, offset);
HashItem hitem;
Size itemsz;
-/* elog(DEBUG, "_hash_squeezebucket: squeezing bucket %d", bucket); */
-
/*
* start squeezing into the base bucket page.
*/
woffnum = OffsetNumberNext(PageGetMaxOffsetNumber(wpage));
if (PageAddItem(wpage, (Item) hitem, itemsz, woffnum, LP_USED)
== InvalidOffsetNumber)
- elog(ERROR, "_hash_squeezebucket: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.36 2002/06/20 20:29:24 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashpage.c,v 1.37 2003/07/21 20:29:38 tgl Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
LockRelation(rel, AccessExclusiveLock);
if (RelationGetNumberOfBlocks(rel) != 0)
- elog(ERROR, "Cannot initialize non-empty hash table %s",
+ elog(ERROR, "cannot initialize non-empty hash index \"%s\"",
RelationGetRelationName(rel));
metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE);
* created the first two buckets above.
*/
if (_hash_initbitmap(rel, metap, OADDR_OF(lg2nelem, 1), lg2nelem + 1, 0))
- elog(ERROR, "Problem with _hash_initbitmap.");
+ elog(ERROR, "_hash_initbitmap failed");
/* all done */
_hash_wrtnorelbuf(metabuf);
Buffer buf;
if (blkno == P_NEW)
- elog(ERROR, "_hash_getbuf: internal error: hash AM does not use P_NEW");
+ elog(ERROR, "hash AM does not use P_NEW");
switch (access)
{
case HASH_WRITE:
_hash_setpagelock(rel, blkno, access);
break;
default:
- elog(ERROR, "_hash_getbuf: invalid access (%d) on new blk: %s",
- access, RelationGetRelationName(rel));
+ elog(ERROR, "unrecognized hash access code: %d", access);
break;
}
buf = ReadBuffer(rel, blkno);
_hash_unsetpagelock(rel, blkno, access);
break;
default:
- elog(ERROR, "_hash_relbuf: invalid access (%d) on blk %x: %s",
- access, blkno, RelationGetRelationName(rel));
+ elog(ERROR, "unrecognized hash access code: %d", access);
+ break;
}
ReleaseBuffer(buf);
_hash_relbuf(rel, *bufp, from_access);
break;
default:
- elog(ERROR, "_hash_chgbufaccess: invalid access (%d) on blk %x: %s",
- from_access, blkno, RelationGetRelationName(rel));
+ elog(ERROR, "unrecognized hash access code: %d", from_access);
break;
}
*bufp = _hash_getbuf(rel, blkno, to_access);
LockPage(rel, blkno, ShareLock);
break;
default:
- elog(ERROR, "_hash_setpagelock: invalid access (%d) on blk %x: %s",
- access, blkno, RelationGetRelationName(rel));
+ elog(ERROR, "unrecognized hash access code: %d", access);
break;
}
}
UnlockPage(rel, blkno, ShareLock);
break;
default:
- elog(ERROR, "_hash_unsetpagelock: invalid access (%d) on blk %x: %s",
- access, blkno, RelationGetRelationName(rel));
+ elog(ERROR, "unrecognized hash access code: %d", access);
break;
}
}
Bucket new_bucket;
uint32 spare_ndx;
-/* elog(DEBUG, "_hash_expandtable: expanding..."); */
-
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
Page npage;
TupleDesc itupdesc;
-/* elog(DEBUG, "_hash_splitpage: splitting %d into %d,%d",
- obucket, obucket, nbucket);
-*/
metap = (HashMetaPage) BufferGetPage(metabuf);
_hash_checkpage((Page) metap, LH_META_PAGE);
opage = BufferGetPage(obuf);
_hash_checkpage(opage, LH_OVERFLOW_PAGE);
if (PageIsEmpty(opage))
- elog(ERROR, "_hash_splitpage: empty overflow page %d", oblkno);
+ elog(ERROR, "empty hash overflow page %u", oblkno);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
}
opage = BufferGetPage(obuf);
_hash_checkpage(opage, LH_OVERFLOW_PAGE);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
-
/* we're guaranteed that an ovfl page has at least 1 tuple */
if (PageIsEmpty(opage))
- {
- elog(ERROR, "_hash_splitpage: empty ovfl page %d!",
- oblkno);
- }
+ elog(ERROR, "empty hash overflow page %u", oblkno);
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
}
noffnum = OffsetNumberNext(PageGetMaxOffsetNumber(npage));
if (PageAddItem(npage, (Item) hitem, itemsz, noffnum, LP_USED)
== InvalidOffsetNumber)
- elog(ERROR, "_hash_splitpage: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(rel));
_hash_wrtnorelbuf(nbuf);
oblkno = BufferGetBlockNumber(obuf);
oopaque = (HashPageOpaque) PageGetSpecialPointer(opage);
if (PageIsEmpty(opage))
- {
- elog(ERROR, "_hash_splitpage: empty overflow page %d",
- oblkno);
- }
+ elog(ERROR, "empty hash overflow page %u", oblkno);
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.31 2002/07/02 06:18:57 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/hash/hashutil.c,v 1.32 2003/07/21 20:29:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/* disallow nulls in hash keys */
if (IndexTupleHasNulls(itup))
- elog(ERROR, "hash indices cannot include null keys");
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("hash indexes cannot include null keys")));
/* make a copy of the index tuple with room for the sequence number */
tuplen = IndexTupleSize(itup);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.151 2003/02/23 20:32:11 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.152 2003/07/21 20:29:38 tgl Exp $
*
*
* INTERFACE ROUTINES
*/
#ifdef HEAPDEBUGALL
if (ItemPointerIsValid(tid))
- {
- elog(LOG, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
+ elog(DEBUG2, "heapgettup(%s, tid=0x%x[%d,%d], dir=%d, ...)",
RelationGetRelationName(relation), tid, tid->ip_blkid,
tid->ip_posid, dir);
- }
else
- {
- elog(LOG, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
+ elog(DEBUG2, "heapgettup(%s, tid=0x%x, dir=%d, ...)",
RelationGetRelationName(relation), tid, dir);
- }
- elog(LOG, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
- elog(LOG, "heapgettup: relation(%c)=`%s', %p",
+ elog(DEBUG2, "heapgettup(..., b=0x%x, nkeys=%d, key=0x%x", buffer, nkeys, key);
+
+ elog(DEBUG2, "heapgettup: relation(%c)=`%s', %p",
relation->rd_rel->relkind, RelationGetRelationName(relation),
snapshot);
#endif /* !defined(HEAPLOGALL) */
relation,
ItemPointerGetBlockNumber(tid));
if (!BufferIsValid(*buffer))
- elog(ERROR, "heapgettup: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(*buffer, BUFFER_LOCK_SHARE);
relation,
page);
if (!BufferIsValid(*buffer))
- elog(ERROR, "heapgettup: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(*buffer, BUFFER_LOCK_SHARE);
relation,
page);
if (!BufferIsValid(*buffer))
- elog(ERROR, "heapgettup: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(*buffer, BUFFER_LOCK_SHARE);
relation,
page);
if (!BufferIsValid(*buffer))
- elog(ERROR, "heapgettup: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(*buffer, BUFFER_LOCK_SHARE);
dp = (Page) BufferGetPage(*buffer);
r = RelationIdGetRelation(relationId);
if (!RelationIsValid(r))
- elog(ERROR, "Relation %u does not exist", relationId);
+ elog(ERROR, "could not open relation with OID %u", relationId);
if (lockmode != NoLock)
LockRelation(r, lockmode);
r = RelationSysNameGetRelation(sysRelationName);
if (!RelationIsValid(r))
- elog(ERROR, "Relation \"%s\" does not exist", sysRelationName);
+ elog(ERROR, "could not open relation \"%s\"", sysRelationName);
if (lockmode != NoLock)
LockRelation(r, lockmode);
r = relation_open(relationId, lockmode);
if (r->rd_rel->relkind == RELKIND_INDEX)
- elog(ERROR, "%s is an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is an index relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_SPECIAL)
- elog(ERROR, "%s is a special relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a special relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
- elog(ERROR, "%s is a composite type",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a composite type",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
r = relation_openrv(relation, lockmode);
if (r->rd_rel->relkind == RELKIND_INDEX)
- elog(ERROR, "%s is an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is an index relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_SPECIAL)
- elog(ERROR, "%s is a special relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a special relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
- elog(ERROR, "%s is a composite type",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a composite type",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
r = relation_openr(sysRelationName, lockmode);
if (r->rd_rel->relkind == RELKIND_INDEX)
- elog(ERROR, "%s is an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is an index relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_SPECIAL)
- elog(ERROR, "%s is a special relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a special relation",
+ RelationGetRelationName(r))));
else if (r->rd_rel->relkind == RELKIND_COMPOSITE_TYPE)
- elog(ERROR, "%s is a composite type",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is a composite type",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
{
HeapScanDesc scan;
- /*
- * sanity checks
- */
- if (!RelationIsValid(relation))
- elog(ERROR, "heap_beginscan: !RelationIsValid(relation)");
-
/*
* increment relation ref count while scanning relation
*
#ifdef HEAPDEBUGALL
#define HEAPDEBUG_1 \
- elog(LOG, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
+ elog(DEBUG2, "heap_getnext([%s,nkeys=%d],dir=%d) called", \
RelationGetRelationName(scan->rs_rd), scan->rs_nkeys, (int) direction)
-
#define HEAPDEBUG_2 \
- elog(LOG, "heap_getnext returning EOS")
-
+ elog(DEBUG2, "heap_getnext returning EOS")
#define HEAPDEBUG_3 \
- elog(LOG, "heap_getnext returning tuple")
+ elog(DEBUG2, "heap_getnext returning tuple")
#else
#define HEAPDEBUG_1
#define HEAPDEBUG_2
{
/* Note: no locking manipulations needed */
- /*
- * argument checks
- */
- if (scan == NULL)
- elog(ERROR, "heap_getnext: NULL relscan");
-
HEAPDEBUG_1; /* heap_getnext( info ) */
/*
* the tuple); when keep_buf = false, the pin is released and *userbuf is set
* to InvalidBuffer.
*
- * It is somewhat inconsistent that we elog() on invalid block number but
+ * It is somewhat inconsistent that we ereport() on invalid block number but
* return false on invalid item number. This is historical. The only
* justification I can see is that the caller can relatively easily check the
* block number for validity, but cannot check the item number without reading
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
if (!BufferIsValid(buffer))
- elog(ERROR, "heap_fetch: ReadBuffer(%s, %lu) failed",
+ elog(ERROR, "ReadBuffer(\"%s\", %lu) failed",
RelationGetRelationName(relation),
(unsigned long) ItemPointerGetBlockNumber(tid));
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
if (!BufferIsValid(buffer))
- elog(ERROR, "heap_get_latest_tid: %s relation: ReadBuffer(%lx) failed",
- RelationGetRelationName(relation), (long) tid);
+ elog(ERROR, "ReadBuffer(\"%s\", %lu) failed",
+ RelationGetRelationName(relation),
+ (unsigned long) ItemPointerGetBlockNumber(tid));
LockBuffer(buffer, BUFFER_LOCK_SHARE);
/* Find buffer to insert this tuple into */
buffer = RelationGetBufferForTuple(relation, tup->t_len, InvalidBuffer);
- /* NO ELOG(ERROR) from here till changes are logged */
+ /* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
RelationPutHeapTuple(relation, buffer, tup);
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
if (!BufferIsValid(buffer))
- elog(ERROR, "heap_delete: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
- elog(ERROR, "heap_delete: (am)invalid tid");
+ elog(ERROR, "attempted to delete invisible tuple");
}
else if (result == HeapTupleBeingUpdated)
{
* This routine may be used to delete a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
- * via elog().
+ * via ereport().
*/
void
simple_heap_delete(Relation relation, ItemPointer tid)
{
case HeapTupleSelfUpdated:
/* Tuple was already updated in current command? */
- elog(ERROR, "simple_heap_delete: tuple already updated by self");
+ elog(ERROR, "tuple already updated by self");
break;
case HeapTupleMayBeUpdated:
break;
case HeapTupleUpdated:
- elog(ERROR, "simple_heap_delete: tuple concurrently updated");
+ elog(ERROR, "tuple concurrently updated");
break;
default:
- elog(ERROR, "Unknown status %u from heap_delete", result);
+ elog(ERROR, "unrecognized heap_delete status: %u", result);
break;
}
}
buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(otid));
if (!BufferIsValid(buffer))
- elog(ERROR, "heap_update: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
dp = (PageHeader) BufferGetPage(buffer);
{
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(buffer);
- elog(ERROR, "heap_update: (am)invalid tid");
+ elog(ERROR, "attempted to update invisible tuple");
}
else if (result == HeapTupleBeingUpdated)
{
* buffer, only one pin is held.
*/
- /* NO ELOG(ERROR) from here till changes are logged */
+ /* NO EREPORT(ERROR) from here till changes are logged */
START_CRIT_SECTION();
RelationPutHeapTuple(relation, newbuf, newtup); /* insert new tuple */
* This routine may be used to update a tuple when concurrent updates of
* the target tuple are not expected (for example, because we have a lock
* on the relation associated with the tuple). Any failure is reported
- * via elog().
+ * via ereport().
*/
void
simple_heap_update(Relation relation, ItemPointer otid, HeapTuple tup)
{
case HeapTupleSelfUpdated:
/* Tuple was already updated in current command? */
- elog(ERROR, "simple_heap_update: tuple already updated by self");
+ elog(ERROR, "tuple already updated by self");
break;
case HeapTupleMayBeUpdated:
break;
case HeapTupleUpdated:
- elog(ERROR, "simple_heap_update: tuple concurrently updated");
+ elog(ERROR, "tuple concurrently updated");
break;
default:
- elog(ERROR, "Unknown status %u from heap_update", result);
+ elog(ERROR, "unrecognized heap_update status: %u", result);
break;
}
}
*buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
if (!BufferIsValid(*buffer))
- elog(ERROR, "heap_mark4update: failed ReadBuffer");
+ elog(ERROR, "ReadBuffer failed");
LockBuffer(*buffer, BUFFER_LOCK_EXCLUSIVE);
{
LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
ReleaseBuffer(*buffer);
- elog(ERROR, "heap_mark4update: (am)invalid tid");
+ elog(ERROR, "attempted to mark4update invisible tuple");
}
else if (result == HeapTupleBeingUpdated)
{
*
*
* IDENTIFICATION
- * $Id: hio.c,v 1.47 2003/02/13 05:35:11 momjian Exp $
+ * $Id: hio.c,v 1.48 2003/07/21 20:29:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/*
* RelationPutHeapTuple - place tuple at specified page
*
- * !!! ELOG(ERROR) IS DISALLOWED HERE !!!
+ * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!!
*
* Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
*/
tuple->t_len, InvalidOffsetNumber, LP_USED);
if (offnum == InvalidOffsetNumber)
- elog(PANIC, "RelationPutHeapTuple: failed to add tuple");
+ elog(PANIC, "failed to add tuple to page");
/* Update tuple->t_self to the actual position where it was stored */
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
* for indices only. Alternatively, we could define pseudo-table as
* we do for transactions with XactLockTable.
*
- * ELOG(ERROR) is allowed here, so this routine *must* be called
+ * ereport(ERROR) is allowed here, so this routine *must* be called
* before any (unlogged) changes are made in buffer pool.
*/
Buffer
* If we're gonna fail for oversize tuple, do it right away
*/
if (len > MaxTupleSize)
- elog(ERROR, "Tuple is too big: size %lu, max size %ld",
- (unsigned long) len, MaxTupleSize);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("tuple is too big: size %lu, maximum size %lu",
+ (unsigned long) len,
+ (unsigned long) MaxTupleSize)));
if (otherBuffer != InvalidBuffer)
otherBlock = BufferGetBlockNumber(otherBuffer);
if (len > PageGetFreeSpace(pageHeader))
{
/* We should not get here given the test at the top */
- elog(PANIC, "Tuple is too big: size %lu", (unsigned long) len);
+ elog(PANIC, "tuple is too big: size %lu", (unsigned long) len);
}
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.36 2002/09/04 20:31:09 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v 1.37 2003/07/21 20:29:39 tgl Exp $
*
*
* INTERFACE ROUTINES
memcpy(VARATT_DATA(&chunk_data), data_p, chunk_size);
toasttup = heap_formtuple(toasttupDesc, t_values, t_nulls);
if (!HeapTupleIsValid(toasttup))
- elog(ERROR, "Failed to build TOAST tuple");
+ elog(ERROR, "failed to build TOAST tuple");
simple_heap_insert(toastrel, toasttup);
&(toasttup->t_self),
toastrel, toastidx->rd_index->indisunique);
if (idxres == NULL)
- elog(ERROR, "Failed to insert index entry for TOAST tuple");
+ elog(ERROR, "failed to insert index entry for TOAST tuple");
/*
* Free memory
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.38 2003/03/24 21:42:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/genam.c,v 1.39 2003/07/21 20:29:39 tgl Exp $
*
* NOTES
* many of the old access method routines have been turned into
{
IndexScanDesc scan;
- if (!RelationIsValid(indexRelation))
- elog(ERROR, "RelationGetIndexScan: relation invalid");
-
scan = (IndexScanDesc) palloc(sizeof(IndexScanDescData));
scan->heapRelation = NULL; /* may be set later */
void
IndexScanEnd(IndexScanDesc scan)
{
- if (!IndexScanIsValid(scan))
- elog(ERROR, "IndexScanEnd: invalid scan");
-
if (scan->keyData != NULL)
pfree(scan->keyData);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.66 2003/03/24 21:42:33 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/indexam.c,v 1.67 2003/07/21 20:29:39 tgl Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
procedure = indexRelation->rd_am->y, \
(!RegProcedureIsValid(procedure)) ? \
elog(ERROR, "index_%s: invalid %s regproc", \
- CppAsString(x), CppAsString(y)) \
+ CppAsString(x), CppAsString(y)) \
: (void)NULL \
)
procedure = scan->indexRelation->rd_am->y, \
(!RegProcedureIsValid(procedure)) ? \
elog(ERROR, "index_%s: invalid %s regproc", \
- CppAsString(x), CppAsString(y)) \
+ CppAsString(x), CppAsString(y)) \
: (void)NULL \
)
r = relation_open(relationId, NoLock);
if (r->rd_rel->relkind != RELKIND_INDEX)
- elog(ERROR, "%s is not an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not an index relation",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
r = relation_openrv(relation, NoLock);
if (r->rd_rel->relkind != RELKIND_INDEX)
- elog(ERROR, "%s is not an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not an index relation",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
r = relation_openr(sysRelationName, NoLock);
if (r->rd_rel->relkind != RELKIND_INDEX)
- elog(ERROR, "%s is not an index relation",
- RelationGetRelationName(r));
+ ereport(ERROR,
+ (errcode(ERRCODE_WRONG_OBJECT_TYPE),
+ errmsg("\"%s\" is not an index relation",
+ RelationGetRelationName(r))));
pgstat_initstats(&r->pgstat_info, r);
* use index_getprocid.)
*/
if (!RegProcedureIsValid(procId))
- elog(ERROR, "Missing support function %d for attribute %d of index %s",
+ elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
procnum, attnum, RelationGetRelationName(irel));
fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.58 2002/06/20 20:29:25 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/index/Attic/istrat.c,v 1.59 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
break;
default:
- elog(ERROR, "StrategyTermEvaluate: impossible case %d",
+ elog(ERROR, "impossible strategy case: %d",
operator->flags ^ entry->sk_flags);
}
if (!result)
break;
default:
- elog(FATAL, "RelationGetStrategy: impossible case %d", entry->sk_flags);
+ elog(ERROR, "impossible strategy case: %d",
+ entry->sk_flags);
}
if (!StrategyNumberIsInBounds(strategy, evaluation->maxStrategy))
{
if (!StrategyNumberIsValid(strategy))
- elog(ERROR, "RelationGetStrategy: corrupted evaluation");
+ elog(ERROR, "corrupted strategy evaluation");
}
return strategy;
}
}
- elog(ERROR, "RelationInvokeStrategy: cannot evaluate strategy %d",
- strategy);
+ elog(ERROR, "cannot evaluate strategy %d", strategy);
/* not reached, just to make compiler happy */
return FALSE;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.100 2003/05/27 17:49:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.101 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
*
* Returns InvalidTransactionId if there is no conflict, else an xact ID
* we must wait for to see if it commits a conflicting tuple. If an actual
- * conflict is detected, no return --- just elog().
+ * conflict is detected, no return --- just ereport().
*/
static TransactionId
_bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* Otherwise we have a definite conflict.
*/
- elog(ERROR, "Cannot insert a duplicate key into unique index %s",
- RelationGetRelationName(rel));
+ ereport(ERROR,
+ (errcode(ERRCODE_UNIQUE_VIOLATION),
+ errmsg("duplicate key violates UNIQUE constraint \"%s\"",
+ RelationGetRelationName(rel))));
}
else if (htup.t_data != NULL)
{
if (!P_IGNORE(opaque))
break;
if (P_RIGHTMOST(opaque))
- elog(ERROR, "_bt_check_unique: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
}
maxoff = PageGetMaxOffsetNumber(page);
* itemsz doesn't include the ItemId.
*/
if (itemsz > BTMaxItemSize(page))
- elog(ERROR, "btree: index item size %lu exceeds maximum %lu",
- (unsigned long) itemsz, BTMaxItemSize(page));
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("index tuple size %lu exceeds btree maximum, %lu",
+ (unsigned long) itemsz,
+ (unsigned long) BTMaxItemSize(page))));
/*
* Determine exactly where new item will go.
if (!P_IGNORE(lpageop))
break;
if (P_RIGHTMOST(lpageop))
- elog(ERROR, "_bt_insertonpg: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
}
_bt_relbuf(rel, buf);
}
}
- /* Do the actual update. No elog(ERROR) until changes are logged */
+ /* Do the update. No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
_bt_pgaddtup(rel, page, itemsz, btitem, newitemoff, "page");
item = (BTItem) PageGetItem(origpage, itemid);
if (PageAddItem(rightpage, (Item) item, itemsz, rightoff,
LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "btree: failed to add hikey to the right sibling");
+ elog(PANIC, "failed to add hikey to the right sibling");
rightoff = OffsetNumberNext(rightoff);
}
}
if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "btree: failed to add hikey to the left sibling");
+ elog(PANIC, "failed to add hikey to the left sibling");
leftoff = OffsetNumberNext(leftoff);
/*
spage = BufferGetPage(sbuf);
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
if (sopaque->btpo_prev != ropaque->btpo_prev)
- elog(PANIC, "btree: right sibling's left-link doesn't match");
+ elog(PANIC, "right sibling's left-link doesn't match");
}
/*
* Right sibling is locked, new siblings are prepared, but original
* page is not updated yet. Log changes before continuing.
*
- * NO ELOG(ERROR) till right sibling is updated.
+ * NO EREPORT(ERROR) till right sibling is updated.
*/
START_CRIT_SECTION();
* just in case ...
*/
if (!state.have_split)
- elog(FATAL, "_bt_findsplitloc: can't find a feasible split point for %s",
+ elog(ERROR, "cannot find a feasible split point for \"%s\"",
RelationGetRelationName(rel));
*newitemonleft = state.newitemonleft;
BTPageOpaque lpageop;
if (!InRecovery)
- elog(DEBUG2, "_bt_insert_parent: concurrent ROOT page split");
+ elog(DEBUG2, "concurrent ROOT page split");
lpageop = (BTPageOpaque) PageGetSpecialPointer(page);
/* Find the leftmost page at the next level up */
pbuf = _bt_get_endpoint(rel, lpageop->btpo.level + 1, false);
/* Check for error only after writing children */
if (pbuf == InvalidBuffer)
- elog(ERROR, "_bt_getstackbuf: my bits moved right off the end of the world!"
- "\n\tRecreate index %s.", RelationGetRelationName(rel));
+ elog(ERROR, "failed to re-find parent key in \"%s\"",
+ RelationGetRelationName(rel));
/* Recursively update the parent */
newres = _bt_insertonpg(rel, pbuf, stack->bts_parent,
metapg = BufferGetPage(metabuf);
metad = BTPageGetMeta(metapg);
- /* NO ELOG(ERROR) from here till newroot op is logged */
+ /* NO EREPORT(ERROR) from here till newroot op is logged */
START_CRIT_SECTION();
/* set btree special data */
* the two items will go into positions P_HIKEY and P_FIRSTKEY.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_HIKEY, LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "btree: failed to add leftkey to new root page");
+ elog(PANIC, "failed to add leftkey to new root page");
pfree(new_item);
/*
* insert the right page pointer into the new root page.
*/
if (PageAddItem(rootpage, (Item) new_item, itemsz, P_FIRSTKEY, LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "btree: failed to add rightkey to new root page");
+ elog(PANIC, "failed to add rightkey to new root page");
pfree(new_item);
/* XLOG stuff */
if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
LP_USED) == InvalidOffsetNumber)
- elog(PANIC, "btree: failed to add item to the %s for %s",
+ elog(PANIC, "failed to add item to the %s for \"%s\"",
where, RelationGetRelationName(rel));
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.65 2003/05/27 17:49:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtpage.c,v 1.66 2003/07/21 20:29:39 tgl Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
BTPageOpaque op;
if (RelationGetNumberOfBlocks(rel) != 0)
- elog(ERROR, "Cannot initialize non-empty btree %s",
+ elog(ERROR, "cannot initialize non-empty btree index \"%s\"",
RelationGetRelationName(rel));
buf = ReadBuffer(rel, P_NEW);
/* sanity-check the metapage */
if (!(metaopaque->btpo_flags & BTP_META) ||
metad->btm_magic != BTREE_MAGIC)
- elog(ERROR, "Index %s is not a btree",
- RelationGetRelationName(rel));
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("index \"%s\" is not a btree",
+ RelationGetRelationName(rel))));
if (metad->btm_version != BTREE_VERSION)
- elog(ERROR, "Version mismatch on %s: version %d file, version %d code",
- RelationGetRelationName(rel),
- metad->btm_version, BTREE_VERSION);
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("version mismatch in \"%s\": file version %d, code version %d",
+ RelationGetRelationName(rel),
+ metad->btm_version, BTREE_VERSION)));
/* if no root page initialized yet, do it */
if (metad->btm_root == P_NONE)
/* it's dead, Jim. step right one page */
if (P_RIGHTMOST(rootopaque))
- elog(ERROR, "No live root page found in %s",
+ elog(ERROR, "no live root page found in \"%s\"",
RelationGetRelationName(rel));
rootblkno = rootopaque->btpo_next;
/* Note: can't check btpo.level on deleted pages */
if (rootopaque->btpo.level != rootlevel)
- elog(ERROR, "Root page %u of %s has level %u, expected %u",
+ elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
rootblkno, RelationGetRelationName(rel),
rootopaque->btpo.level, rootlevel);
}
if (!(metaopaque->btpo_flags & BTP_META) ||
metad->btm_magic != BTREE_MAGIC)
- elog(ERROR, "Index %s is not a btree",
- RelationGetRelationName(rel));
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("index \"%s\" is not a btree",
+ RelationGetRelationName(rel))));
if (metad->btm_version != BTREE_VERSION)
- elog(ERROR, "Version mismatch on %s: version %d file, version %d code",
- RelationGetRelationName(rel),
- metad->btm_version, BTREE_VERSION);
+ ereport(ERROR,
+ (errcode(ERRCODE_INDEX_CORRUPTED),
+ errmsg("version mismatch in \"%s\": file version %d, code version %d",
+ RelationGetRelationName(rel),
+ metad->btm_version, BTREE_VERSION)));
/* if no root page initialized yet, fail */
if (metad->btm_root == P_NONE)
/* it's dead, Jim. step right one page */
if (P_RIGHTMOST(rootopaque))
- elog(ERROR, "No live root page found in %s",
+ elog(ERROR, "no live root page found in \"%s\"",
RelationGetRelationName(rel));
rootblkno = rootopaque->btpo_next;
/* Note: can't check btpo.level on deleted pages */
if (rootopaque->btpo.level != rootlevel)
- elog(ERROR, "Root page %u of %s has level %u, expected %u",
+ elog(ERROR, "root page %u of \"%s\" has level %u, expected %u",
rootblkno, RelationGetRelationName(rel),
rootopaque->btpo.level, rootlevel);
_bt_pageinit(page, BufferGetPageSize(buf));
return buf;
}
- elog(DEBUG2, "_bt_getbuf: FSM returned nonrecyclable page");
+ elog(DEBUG2, "FSM returned nonrecyclable page");
_bt_relbuf(rel, buf);
}
Page page = BufferGetPage(buf);
int i;
- /* No elog(ERROR) until changes are logged */
+ /* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/*
for (;;)
{
if (stack == NULL)
- elog(ERROR, "_bt_pagedel: not enough stack items");
+ elog(ERROR, "not enough stack items");
if (ilevel == targetlevel)
break;
stack = stack->bts_parent;
_bt_relbuf(rel, lbuf);
if (leftsib == P_NONE)
{
- elog(LOG, "_bt_pagedel: no left sibling (concurrent deletion?)");
+ elog(LOG, "no left sibling (concurrent deletion?)");
return 0;
}
lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
return 0;
}
if (opaque->btpo_prev != leftsib)
- elog(ERROR, "_bt_pagedel: left link changed unexpectedly");
+ elog(ERROR, "left link changed unexpectedly");
/*
* And next write-lock the (current) right sibling.
*/
target, P_HIKEY);
pbuf = _bt_getstackbuf(rel, stack, BT_WRITE);
if (pbuf == InvalidBuffer)
- elog(ERROR, "_bt_getstackbuf: my bits moved right off the end of the world!"
- "\n\tRecreate index %s.", RelationGetRelationName(rel));
+ elog(ERROR, "failed to re-find parent key in \"%s\"",
+ RelationGetRelationName(rel));
parent = stack->bts_blkno;
poffset = stack->bts_offset;
/*
* Here we begin doing the deletion.
*/
- /* No elog(ERROR) until changes are logged */
+ /* No ereport(ERROR) until changes are logged */
START_CRIT_SECTION();
/*
itemid = PageGetItemId(page, nextoffset);
btitem = (BTItem) PageGetItem(page, itemid);
if (ItemPointerGetBlockNumber(&(btitem->bti_itup.t_tid)) != rightsib)
- elog(PANIC, "_bt_pagedel: right sibling is not next child");
+ elog(PANIC, "right sibling is not next child");
PageIndexTupleDelete(page, nextoffset);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.102 2003/03/23 23:01:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtree.c,v 1.103 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* that's not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
- elog(ERROR, "%s already contains data",
+ elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* initialize the btree index metadata page */
*/
i = FlushRelationBuffers(rel, new_pages);
if (i < 0)
- elog(ERROR, "btvacuumcleanup: FlushRelationBuffers returned %d",
- i);
+ elog(ERROR, "FlushRelationBuffers returned %d", i);
/*
* Do the physical truncation.
* we can find it again.
*/
if (P_RIGHTMOST(opaque))
- elog(ERROR, "_bt_restscan: my bits moved right off the end of the world!"
- "\n\tRecreate index %s.", RelationGetRelationName(rel));
+ elog(ERROR, "failed to re-find previous key in \"%s\"",
+ RelationGetRelationName(rel));
/* Advance to next non-dead page --- there must be one */
nextbuf = InvalidBuffer;
for (;;)
if (!P_IGNORE(opaque))
break;
if (P_RIGHTMOST(opaque))
- elog(ERROR, "_bt_restscan: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
}
_bt_relbuf(rel, buf);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.74 2003/02/22 00:45:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.75 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
}
if (P_IGNORE(opaque))
- elog(ERROR, "_bt_moveright: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
return buf;
{
pfree(nKeyIs);
pfree(scankeys);
- elog(ERROR, "_bt_first: btree doesn't support is(not)null, yet");
+ elog(ERROR, "btree doesn't support is(not)null, yet");
return false;
}
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
result = _bt_compare(rel, keysCount, scankeys, page, offnum);
} while (result == 0);
if (!_bt_step(scan, &buf, BackwardScanDirection))
- elog(ERROR, "_bt_first: equal items disappeared?");
+ elog(ERROR, "equal items disappeared?");
}
break;
for (;;)
{
if (P_RIGHTMOST(opaque))
- elog(ERROR, "_bt_walk_left: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
blkno = opaque->btpo_next;
_bt_relbuf(rel, buf);
* if there's anything wrong.
*/
if (opaque->btpo_prev == lblkno)
- elog(ERROR, "_bt_walk_left: can't find left sibling in %s",
+ elog(ERROR, "cannot find left sibling in \"%s\"",
RelationGetRelationName(rel));
/* Okay to try again with new lblkno value */
}
* _bt_get_endpoint() -- Find the first or last page on a given tree level
*
* If the index is empty, we will return InvalidBuffer; any other failure
- * condition causes elog(). We will not return a dead page.
+ * condition causes ereport(). We will not return a dead page.
*
* The returned buffer is pinned and read-locked.
*/
{
blkno = opaque->btpo_next;
if (blkno == P_NONE)
- elog(ERROR, "_bt_get_endpoint: fell off the end of %s",
+ elog(ERROR, "fell off the end of \"%s\"",
RelationGetRelationName(rel));
_bt_relbuf(rel, buf);
buf = _bt_getbuf(rel, blkno, BT_READ);
if (opaque->btpo.level == level)
break;
if (opaque->btpo.level < level)
- elog(ERROR, "_bt_get_endpoint: btree level %u not found", level);
+ elog(ERROR, "btree level %u not found", level);
/* Descend to leftmost or rightmost child page */
if (rightmost)
}
else
{
- elog(ERROR, "Illegal scan direction %d", dir);
+ elog(ERROR, "invalid scan direction: %d", (int) dir);
start = 0; /* keep compiler quiet */
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.72 2003/02/22 00:45:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtsort.c,v 1.73 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (PageAddItem(page, (Item) btitem, itemsize, itup_off,
LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "btree: failed to add item to the page in _bt_sort");
+ elog(ERROR, "failed to add item to the index page");
}
/*----------
* during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
- elog(ERROR, "btree: index item size %lu exceeds maximum %ld",
- (unsigned long) btisz, BTMaxItemSize(npage));
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("index tuple size %lu exceeds btree maximum, %lu",
+ (unsigned long) btisz,
+ (unsigned long) BTMaxItemSize(npage))));
if (pgspc < btisz || pgspc < state->btps_full)
{
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.51 2002/09/04 20:31:12 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/nbtree/nbtutils.c,v 1.52 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
cur = &key[0];
/* check input keys are correctly ordered */
if (cur->sk_attno != 1)
- elog(ERROR, "_bt_orderkeys: key(s) for attribute 1 missed");
+ elog(ERROR, "key(s) for attribute 1 missed");
/* We can short-circuit most of the work if there's just one key */
if (numberOfKeys == 1)
/* check input keys are correctly ordered */
if (i < numberOfKeys && cur->sk_attno != attno + 1)
- elog(ERROR, "_bt_orderkeys: key(s) for attribute %d missed",
- attno + 1);
+ elog(ERROR, "key(s) for attribute %d missed", attno + 1);
/*
* If = has been specified, no other key will be used. In case
if (sk_procedure == map->entry[j].sk_procedure)
return j;
}
- elog(ERROR, "_bt_getstrategynumber: unable to identify operator %u",
- sk_procedure);
+ elog(ERROR, "unable to identify operator %u", sk_procedure);
return -1; /* keep compiler quiet */
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.77 2003/02/24 00:57:17 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtree.c,v 1.78 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* that's not the case, big trouble's what we have.
*/
if (RelationGetNumberOfBlocks(index) != 0)
- elog(ERROR, "%s already contains data",
+ elog(ERROR, "index \"%s\" already contains data",
RelationGetRelationName(index));
/* initialize the root page */
LP_USED);
}
if (l == InvalidOffsetNumber)
- elog(ERROR, "rtdoinsert: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
WriteBuffer(buffer);
if (PageAddItem(left, (Item) item, IndexTupleSize(item),
leftoff, LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "rtdosplit: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
leftoff = OffsetNumberNext(leftoff);
if (PageAddItem(right, (Item) item, IndexTupleSize(item),
rightoff, LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "rtdosplit: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
rightoff = OffsetNumberNext(rightoff);
*/
if (IndexTupleSize(old) != IndexTupleSize(ltup))
- elog(ERROR, "Variable-length rtree keys are not supported.");
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("variable-length rtree keys are not supported")));
/* install pointer to left child */
memmove(old, ltup, IndexTupleSize(ltup));
if (PageAddItem(p, (Item) rtup, IndexTupleSize(rtup),
PageGetMaxOffsetNumber(p),
LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "rtintinsert: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
WriteBuffer(b);
ldatum = IndexTupleGetDatum(ltup);
if (PageAddItem(p, (Item) lt, IndexTupleSize(lt),
FirstOffsetNumber,
LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "rtnewroot: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
if (PageAddItem(p, (Item) rt, IndexTupleSize(rt),
OffsetNumberNext(FirstOffsetNumber),
LP_USED) == InvalidOffsetNumber)
- elog(ERROR, "rtnewroot: failed to add index item to %s",
+ elog(ERROR, "failed to add index item to \"%s\"",
RelationGetRelationName(r));
WriteBuffer(b);
}
*/
newitemsz = IndexTupleTotalSize(itup);
if (newitemsz > RTPageAvailSpace)
- elog(ERROR, "rtree: index item size %lu exceeds maximum %lu",
- (unsigned long) newitemsz, (unsigned long) RTPageAvailSpace);
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("index tuple size %lu exceeds rtree maximum, %lu",
+ (unsigned long) newitemsz,
+ (unsigned long) RTPageAvailSpace)));
maxoff = PageGetMaxOffsetNumber(page);
newitemoff = OffsetNumberNext(maxoff); /* phony index for new
choose_left = false;
else
{
- elog(ERROR, "rtpicksplit: failed to find a workable page split");
+ elog(ERROR, "failed to find a workable rtree page split");
choose_left = false; /* keep compiler quiet */
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.43 2003/03/23 23:01:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/rtree/Attic/rtscan.c,v 1.44 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
break;
default:
- elog(ERROR, "Bad operation in rtree scan adjust: %d", op);
+ elog(ERROR, "unrecognized operation in rtree scan adjust: %d", op);
}
}
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.148 2003/05/14 03:26:00 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xact.c,v 1.149 2003/07/21 20:29:39 tgl Exp $
*
* NOTES
* Transaction aborts can now occur two ways:
s->commandId += 1;
if (s->commandId == FirstCommandId) /* check for overflow */
- elog(ERROR, "You may only have 2^32-1 commands per transaction");
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("cannot have more than 2^32-1 commands in a transaction")));
/* Propagate new command ID into query snapshots, if set */
if (QuerySnapshot)
* RecordTransactionCommit ...
*/
if (TransactionIdDidCommit(xid))
- elog(PANIC, "RecordTransactionAbort: xact %u already committed",
- xid);
+ elog(PANIC, "cannot abort transaction %u, it was already committed", xid);
START_CRIT_SECTION();
* xact block already started?
*/
if (IsTransactionBlock())
- {
- /* translator: %s represents an SQL statement name */
- elog(ERROR, "%s cannot run inside a transaction block", stmtType);
- }
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot run inside a transaction block",
+ stmtType)));
/*
* Are we inside a function call? If the statement's parameter block
* was allocated in QueryContext, assume it is an interactive command.
* Otherwise assume it is coming from a function.
*/
if (!MemoryContextContains(QueryContext, stmtNode))
- {
- /* translator: %s represents an SQL statement name */
- elog(ERROR, "%s cannot be executed from a function", stmtType);
- }
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s cannot be executed from a function", stmtType)));
/* If we got past IsTransactionBlock test, should be in default state */
if (CurrentTransactionState->blockState != TBLOCK_DEFAULT)
- elog(ERROR, "PreventTransactionChain: can't prevent chain");
+ elog(ERROR, "cannot prevent transaction chain");
/* all okay */
}
*/
if (!MemoryContextContains(QueryContext, stmtNode))
return;
- /* translator: %s represents an SQL statement name */
- elog(ERROR, "%s may only be used in begin/end transaction blocks",
- stmtType);
+ ereport(ERROR,
+ (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+ /* translator: %s represents an SQL statement name */
+ errmsg("%s may only be used in BEGIN/END transaction blocks",
+ stmtType)));
}
* check the current transaction state
*/
if (s->blockState != TBLOCK_DEFAULT)
- elog(WARNING, "BEGIN: already a transaction in progress");
+ ereport(WARNING,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("there is already a transaction in progress")));
/*
* set the current transaction block state information appropriately
* CommitTransactionCommand() will then put us back into the default
* state.
*/
- elog(WARNING, "COMMIT: no transaction in progress");
+ ereport(WARNING,
+ (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+ errmsg("there is no transaction in progress")));
AbortTransaction();
s->blockState = TBLOCK_ENDABORT;
}
* CommitTransactionCommand() will then put us back into the default
* state.
*/
- elog(WARNING, "ROLLBACK: no transaction in progress");
+ ereport(WARNING,
+ (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+ errmsg("there is no transaction in progress")));
AbortTransaction();
s->blockState = TBLOCK_ENDABORT;
}
* CommitTransactionCommand() will then put us back into the default
* state.
*/
- elog(WARNING, "ROLLBACK: no transaction in progress");
+ ereport(WARNING,
+ (errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
+ errmsg("there is no transaction in progress")));
AbortTransaction();
s->blockState = TBLOCK_ENDABORT;
}
}
/* should never get here */
- elog(ERROR, "bogus transaction block state");
+ elog(ERROR, "invalid transaction block state: %d",
+ (int) s->blockState);
return 0; /* keep compiler quiet */
}
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.118 2003/07/17 16:45:04 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/access/transam/xlog.c,v 1.119 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (info & XLR_INFO_MASK)
{
if ((info & XLR_INFO_MASK) != XLOG_NO_TRAN)
- elog(PANIC, "XLogInsert: invalid info mask %02X",
- (info & XLR_INFO_MASK));
+ elog(PANIC, "invalid xlog info mask %02X", (info & XLR_INFO_MASK));
no_tran = true;
info &= ~XLR_INFO_MASK;
}
}
}
if (i >= XLR_MAX_BKP_BLOCKS)
- elog(PANIC, "XLogInsert: can backup %d blocks at most",
+ elog(PANIC, "can backup at most %d blocks per xlog record",
XLR_MAX_BKP_BLOCKS);
}
/* Break out of loop when rdt points to last list item */
* also remove the check for xl_len == 0 in ReadRecord, below.
*/
if (len == 0 || len > MAXLOGRECSZ)
- elog(PANIC, "XLogInsert: invalid record length %u", len);
+ elog(PANIC, "invalid xlog record length %u", len);
START_CRIT_SECTION();
* AdvanceXLInsertBuffer.
*/
if (!XLByteLT(LogwrtResult.Write, XLogCtl->xlblocks[Write->curridx]))
- elog(PANIC, "XLogWrite: write request %X/%X is past end of log %X/%X",
+ elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
XLogCtl->xlblocks[Write->curridx].xlogid,
XLogCtl->xlblocks[Write->curridx].xrecoff);
if (openLogFile >= 0)
{
if (close(openLogFile) != 0)
- elog(PANIC, "close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
XLByteToPrevSeg(LogwrtResult.Write, openLogId, openLogSeg);
(uint32) CheckPointSegments))
{
if (XLOG_DEBUG)
- elog(LOG, "XLogWrite: time for a checkpoint, signaling postmaster");
+ elog(LOG, "time for a checkpoint, signaling postmaster");
SendPostmasterSignal(PMSIGNAL_DO_CHECKPOINT);
}
}
{
openLogOff = (LogwrtResult.Write.xrecoff - BLCKSZ) % XLogSegSize;
if (lseek(openLogFile, (off_t) openLogOff, SEEK_SET) < 0)
- elog(PANIC, "lseek of log file %u, segment %u, offset %u failed: %m",
- openLogId, openLogSeg, openLogOff);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("lseek of log file %u, segment %u, offset %u failed: %m",
+ openLogId, openLogSeg, openLogOff)));
}
/* OK to write the page */
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(PANIC, "write of log file %u, segment %u, offset %u failed: %m",
- openLogId, openLogSeg, openLogOff);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("write of log file %u, segment %u, offset %u failed: %m",
+ openLogId, openLogSeg, openLogOff)));
}
openLogOff += BLCKSZ;
!XLByteInPrevSeg(LogwrtResult.Write, openLogId, openLogSeg))
{
if (close(openLogFile) != 0)
- elog(PANIC, "close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
if (openLogFile < 0)
return;
if (XLOG_DEBUG)
- {
- elog(LOG, "XLogFlush%s: request %X/%X; write %X/%X; flush %X/%X",
- (IsBootstrapProcessingMode()) ? "(bootstrap)" : "",
+ elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
record.xlogid, record.xrecoff,
LogwrtResult.Write.xlogid, LogwrtResult.Write.xrecoff,
LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
- }
START_CRIT_SECTION();
*/
if (XLByteLT(LogwrtResult.Flush, record))
elog(InRecovery ? WARNING : ERROR,
- "XLogFlush: request %X/%X is not satisfied --- flushed only to %X/%X",
+ "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
record.xlogid, record.xrecoff,
LogwrtResult.Flush.xlogid, LogwrtResult.Flush.xrecoff);
}
if (fd < 0)
{
if (errno != ENOENT)
- elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
- path, log, seg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
else
return (fd);
fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(PANIC, "creation of file %s failed: %m", tmppath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("creation of file \"%s\" failed: %m", tmppath)));
/*
* Zero-fill the file. We have to do this the hard way to ensure that
/* if write didn't set errno, assume problem is no disk space */
errno = save_errno ? save_errno : ENOSPC;
- elog(PANIC, "ZeroFill failed to write %s: %m", tmppath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("failed to write \"%s\": %m", tmppath)));
}
}
if (pg_fsync(fd) != 0)
- elog(PANIC, "fsync of file %s failed: %m", tmppath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fsync of file \"%s\" failed: %m", tmppath)));
close(fd);
fd = BasicOpenFile(path, O_RDWR | PG_BINARY | XLOG_SYNC_BIT,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
- path, log, seg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
* caller must *not* hold the lock at call.
*
* Returns TRUE if file installed, FALSE if not installed because of
- * exceeding max_advance limit. (Any other kind of failure causes elog().)
+ * exceeding max_advance limit. (Any other kind of failure causes ereport().)
*/
static bool
InstallXLogFileSegment(uint32 log, uint32 seg, char *tmppath,
*/
#if HAVE_WORKING_LINK
if (link(tmppath, path) < 0)
- elog(PANIC, "link from %s to %s (initialization of log file %u, segment %u) failed: %m",
- tmppath, path, log, seg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("link from \"%s\" to \"%s\" (initialization of log file %u, segment %u) failed: %m",
+ tmppath, path, log, seg)));
unlink(tmppath);
#else
if (rename(tmppath, path) < 0)
- elog(PANIC, "rename from %s to %s (initialization of log file %u, segment %u) failed: %m",
- tmppath, path, log, seg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("rename from \"%s\" to \"%s\" (initialization of log file %u, segment %u) failed: %m",
+ tmppath, path, log, seg)));
#endif
if (use_lock)
{
if (econt && errno == ENOENT)
{
- elog(LOG, "open of %s (log file %u, segment %u) failed: %m",
- path, log, seg);
+ ereport(LOG,
+ (errcode_for_file_access(),
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
return (fd);
}
- elog(PANIC, "open of %s (log file %u, segment %u) failed: %m",
- path, log, seg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("open of \"%s\" (log file %u, segment %u) failed: %m",
+ path, log, seg)));
}
return (fd);
xldir = opendir(XLogDir);
if (xldir == NULL)
- elog(PANIC, "could not open transaction log directory (%s): %m",
- XLogDir);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open transaction log directory \"%s\": %m",
+ XLogDir)));
sprintf(lastoff, "%08X%08X", log, seg);
snprintf(path, MAXPGPATH, "%s/%s", XLogDir, xlde->d_name);
if (XLOG_archive_dir[0])
{
- elog(LOG, "archiving transaction log file %s",
- xlde->d_name);
- elog(WARNING, "archiving log files is not implemented!");
+ ereport(LOG,
+ (errmsg("archiving transaction log file \"%s\"",
+ xlde->d_name)));
+ elog(WARNING, "archiving log files is not implemented");
}
else
{
true, XLOGfileslop,
true))
{
- elog(LOG, "recycled transaction log file %s",
- xlde->d_name);
+ ereport(LOG,
+ (errmsg("recycled transaction log file \"%s\"",
+ xlde->d_name)));
}
else
{
/* No need for any more future segments... */
- elog(LOG, "removing transaction log file %s",
- xlde->d_name);
+ ereport(LOG,
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
}
errno = 0;
}
if (errno)
- elog(PANIC, "could not read transaction log directory (%s): %m",
- XLogDir);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not read transaction log directory \"%s\": %m",
+ XLogDir)));
closedir(xldir);
}
if (!EQ_CRC64(record->xl_crc, crc))
{
- elog(emode, "ReadRecord: bad resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff);
+ ereport(emode,
+ (errmsg("bad resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
if (!EQ_CRC64(cbuf, crc))
{
- elog(emode, "ReadRecord: bad checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff);
+ ereport(emode,
+ (errmsg("bad checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
tmpRecPtr.xrecoff += SizeOfXLogPHD;
}
else if (!XRecOffIsValid(RecPtr->xrecoff))
- elog(PANIC, "ReadRecord: invalid record offset at %X/%X",
- RecPtr->xlogid, RecPtr->xrecoff);
+ ereport(PANIC,
+ (errmsg("invalid record offset at %X/%X",
+ RecPtr->xlogid, RecPtr->xrecoff)));
if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
{
readOff = targetPageOff;
if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
{
- elog(emode, "ReadRecord: lseek of log file %u, segment %u, offset %u failed: %m",
- readId, readSeg, readOff);
+ ereport(emode,
+ (errcode_for_file_access(),
+ errmsg("lseek of log file %u, segment %u, offset %u failed: %m",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
{
- elog(emode, "ReadRecord: read of log file %u, segment %u, offset %u failed: %m",
- readId, readSeg, readOff);
+ ereport(emode,
+ (errcode_for_file_access(),
+ errmsg("read of log file %u, segment %u, offset %u failed: %m",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode, nextmode))
if ((((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_CONTRECORD) &&
RecPtr->xrecoff % BLCKSZ == SizeOfXLogPHD)
{
- elog(emode, "ReadRecord: contrecord is requested by %X/%X",
- RecPtr->xlogid, RecPtr->xrecoff);
+ ereport(emode,
+ (errmsg("contrecord is requested by %X/%X",
+ RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
record = (XLogRecord *) ((char *) readBuf + RecPtr->xrecoff % BLCKSZ);
*/
if (record->xl_len == 0)
{
- elog(emode, "ReadRecord: record with zero length at %X/%X",
- RecPtr->xlogid, RecPtr->xrecoff);
+ ereport(emode,
+ (errmsg("record with zero length at %X/%X",
+ RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
*/
if (total_len > _INTL_MAXLOGRECSZ)
{
- elog(emode, "ReadRecord: record length %u at %X/%X too long",
- total_len, RecPtr->xlogid, RecPtr->xrecoff);
+ ereport(emode,
+ (errmsg("record length %u at %X/%X too long",
+ total_len, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
if (record->xl_rmid > RM_MAX_ID)
{
- elog(emode, "ReadRecord: invalid resource manager id %u at %X/%X",
- record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff);
+ ereport(emode,
+ (errmsg("invalid resource manager id %u at %X/%X",
+ record->xl_rmid, RecPtr->xlogid, RecPtr->xrecoff)));
goto next_record_is_invalid;
}
nextRecord = NULL;
}
if (read(readFile, readBuf, BLCKSZ) != BLCKSZ)
{
- elog(emode, "ReadRecord: read of log file %u, segment %u, offset %u failed: %m",
- readId, readSeg, readOff);
+ ereport(emode,
+ (errcode_for_file_access(),
+ errmsg("read of log file %u, segment %u, offset %u failed: %m",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
if (!ValidXLOGHeader((XLogPageHeader) readBuf, emode, true))
goto next_record_is_invalid;
if (!(((XLogPageHeader) readBuf)->xlp_info & XLP_FIRST_IS_CONTRECORD))
{
- elog(emode, "ReadRecord: there is no ContRecord flag in log file %u, segment %u, offset %u",
- readId, readSeg, readOff);
+ ereport(emode,
+ (errmsg("there is no contrecord flag in log file %u, segment %u, offset %u",
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
contrecord = (XLogContRecord *) ((char *) readBuf + SizeOfXLogPHD);
if (contrecord->xl_rem_len == 0 ||
total_len != (contrecord->xl_rem_len + gotlen))
{
- elog(emode, "ReadRecord: invalid ContRecord length %u in log file %u, segment %u, offset %u",
- contrecord->xl_rem_len, readId, readSeg, readOff);
+ ereport(emode,
+ (errmsg("invalid contrecord length %u in log file %u, segment %u, offset %u",
+ contrecord->xl_rem_len,
+ readId, readSeg, readOff)));
goto next_record_is_invalid;
}
len = BLCKSZ - SizeOfXLogPHD - SizeOfXLogContRecord;
if (hdr->xlp_magic != XLOG_PAGE_MAGIC)
{
- elog(emode, "ReadRecord: invalid magic number %04X in log file %u, segment %u, offset %u",
- hdr->xlp_magic, readId, readSeg, readOff);
+ ereport(emode,
+ (errmsg("invalid magic number %04X in log file %u, segment %u, offset %u",
+ hdr->xlp_magic, readId, readSeg, readOff)));
return false;
}
if ((hdr->xlp_info & ~XLP_ALL_FLAGS) != 0)
{
- elog(emode, "ReadRecord: invalid info bits %04X in log file %u, segment %u, offset %u",
- hdr->xlp_info, readId, readSeg, readOff);
+ ereport(emode,
+ (errmsg("invalid info bits %04X in log file %u, segment %u, offset %u",
+ hdr->xlp_info, readId, readSeg, readOff)));
return false;
}
recaddr.xlogid = readId;
recaddr.xrecoff = readSeg * XLogSegSize + readOff;
if (!XLByteEQ(hdr->xlp_pageaddr, recaddr))
{
- elog(emode, "ReadRecord: unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
- hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
- readId, readSeg, readOff);
+ ereport(emode,
+ (errmsg("unexpected pageaddr %X/%X in log file %u, segment %u, offset %u",
+ hdr->xlp_pageaddr.xlogid, hdr->xlp_pageaddr.xrecoff,
+ readId, readSeg, readOff)));
return false;
}
if (hdr->xlp_sui < lastReadSUI ||
hdr->xlp_sui > lastReadSUI + 512)
{
- /* translator: SUI = startup id */
- elog(emode, "ReadRecord: out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
- hdr->xlp_sui, lastReadSUI, readId, readSeg, readOff);
+ ereport(emode,
+ /* translator: SUI = startup id */
+ (errmsg("out-of-sequence SUI %u (after %u) in log file %u, segment %u, offset %u",
+ hdr->xlp_sui, lastReadSUI,
+ readId, readSeg, readOff)));
return false;
}
}
ControlFile->localeBuflen = LOCALE_NAME_BUFLEN;
localeptr = setlocale(LC_COLLATE, NULL);
if (!localeptr)
- elog(PANIC, "invalid LC_COLLATE setting");
+ ereport(PANIC,
+ (errmsg("invalid LC_COLLATE setting")));
StrNCpy(ControlFile->lc_collate, localeptr, LOCALE_NAME_BUFLEN);
localeptr = setlocale(LC_CTYPE, NULL);
if (!localeptr)
- elog(PANIC, "invalid LC_CTYPE setting");
+ ereport(PANIC,
+ (errmsg("invalid LC_CTYPE setting")));
StrNCpy(ControlFile->lc_ctype, localeptr, LOCALE_NAME_BUFLEN);
/* Contents are protected with a CRC */
* specific error than "couldn't read pg_control".
*/
if (sizeof(ControlFileData) > BLCKSZ)
- elog(PANIC, "sizeof(ControlFileData) is larger than BLCKSZ; fix either one");
+ ereport(PANIC,
+ (errmsg("sizeof(ControlFileData) is larger than BLCKSZ; fix either one")));
memset(buffer, 0, BLCKSZ);
memcpy(buffer, ControlFile, sizeof(ControlFileData));
fd = BasicOpenFile(ControlFilePath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY,
S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(PANIC, "WriteControlFile: could not create control file (%s): %m",
- ControlFilePath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not create control file \"%s\": %m",
+ ControlFilePath)));
errno = 0;
if (write(fd, buffer, BLCKSZ) != BLCKSZ)
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(PANIC, "WriteControlFile: write to control file failed: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("write to control file failed: %m")));
}
if (pg_fsync(fd) != 0)
- elog(PANIC, "WriteControlFile: fsync of control file failed: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fsync of control file failed: %m")));
close(fd);
}
*/
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open control file \"%s\": %m",
+ ControlFilePath)));
if (read(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
- elog(PANIC, "read from control file failed: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("read from control file failed: %m")));
close(fd);
* more enlightening than complaining about wrong CRC.
*/
if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
- elog(PANIC,
- "The database cluster was initialized with PG_CONTROL_VERSION %d,\n"
- "\tbut the server was compiled with PG_CONTROL_VERSION %d.\n"
- "\tIt looks like you need to initdb.",
- ControlFile->pg_control_version, PG_CONTROL_VERSION);
-
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
+ " but the server was compiled with PG_CONTROL_VERSION %d.",
+ ControlFile->pg_control_version, PG_CONTROL_VERSION),
+ errhint("It looks like you need to initdb.")));
/* Now check the CRC. */
INIT_CRC64(crc);
COMP_CRC64(crc,
FIN_CRC64(crc);
if (!EQ_CRC64(crc, ControlFile->crc))
- elog(PANIC, "invalid checksum in control file");
+ ereport(FATAL,
+ (errmsg("invalid checksum in control file")));
/*
* Do compatibility checking immediately. We do this here for 2
* compatibility items because they can affect sort order of indexes.)
*/
if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
- elog(PANIC,
- "The database cluster was initialized with CATALOG_VERSION_NO %d,\n"
- "\tbut the backend was compiled with CATALOG_VERSION_NO %d.\n"
- "\tIt looks like you need to initdb.",
- ControlFile->catalog_version_no, CATALOG_VERSION_NO);
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
+ " but the server was compiled with CATALOG_VERSION_NO %d.",
+ ControlFile->catalog_version_no, CATALOG_VERSION_NO),
+ errhint("It looks like you need to initdb.")));
if (ControlFile->blcksz != BLCKSZ)
- elog(PANIC,
- "The database cluster was initialized with BLCKSZ %d,\n"
- "\tbut the backend was compiled with BLCKSZ %d.\n"
- "\tIt looks like you need to initdb.",
- ControlFile->blcksz, BLCKSZ);
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with BLCKSZ %d,"
+ " but the server was compiled with BLCKSZ %d.",
+ ControlFile->blcksz, BLCKSZ),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->relseg_size != RELSEG_SIZE)
- elog(PANIC,
- "The database cluster was initialized with RELSEG_SIZE %d,\n"
- "\tbut the backend was compiled with RELSEG_SIZE %d.\n"
- "\tIt looks like you need to recompile or initdb.",
- ControlFile->relseg_size, RELSEG_SIZE);
-
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
+ " but the server was compiled with RELSEG_SIZE %d.",
+ ControlFile->relseg_size, RELSEG_SIZE),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
- elog(PANIC,
- "The database cluster was initialized with NAMEDATALEN %d,\n"
- "\tbut the backend was compiled with NAMEDATALEN %d.\n"
- "\tIt looks like you need to recompile or initdb.",
- ControlFile->nameDataLen, NAMEDATALEN);
-
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with NAMEDATALEN %d,"
+ " but the server was compiled with NAMEDATALEN %d.",
+ ControlFile->nameDataLen, NAMEDATALEN),
+ errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->funcMaxArgs != FUNC_MAX_ARGS)
- elog(PANIC,
- "The database cluster was initialized with FUNC_MAX_ARGS %d,\n"
- "\tbut the backend was compiled with FUNC_MAX_ARGS %d.\n"
- "\tIt looks like you need to recompile or initdb.",
- ControlFile->funcMaxArgs, FUNC_MAX_ARGS);
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with FUNC_MAX_ARGS %d,"
+ " but the server was compiled with FUNC_MAX_ARGS %d.",
+ ControlFile->funcMaxArgs, FUNC_MAX_ARGS),
+ errhint("It looks like you need to recompile or initdb.")));
#ifdef HAVE_INT64_TIMESTAMP
if (ControlFile->enableIntTimes != TRUE)
- elog(PANIC,
- "The database cluster was initialized without HAVE_INT64_TIMESTAMP\n"
- "\tbut the backend was compiled with HAVE_INT64_TIMESTAMP.\n"
- "\tIt looks like you need to recompile or initdb.");
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized without HAVE_INT64_TIMESTAMP"
+ " but the server was compiled with HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#else
if (ControlFile->enableIntTimes != FALSE)
- elog(PANIC,
- "The database cluster was initialized with HAVE_INT64_TIMESTAMP\n"
- "\tbut the backend was compiled without HAVE_INT64_TIMESTAMP.\n"
- "\tIt looks like you need to recompile or initdb.");
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with HAVE_INT64_TIMESTAMP"
+ " but the server was compiled without HAVE_INT64_TIMESTAMP."),
+ errhint("It looks like you need to recompile or initdb.")));
#endif
if (ControlFile->localeBuflen != LOCALE_NAME_BUFLEN)
- elog(PANIC,
- "The database cluster was initialized with LOCALE_NAME_BUFLEN %d,\n"
- "\tbut the backend was compiled with LOCALE_NAME_BUFLEN %d.\n"
- "\tIt looks like you need to initdb.",
- ControlFile->localeBuflen, LOCALE_NAME_BUFLEN);
-
+ ereport(FATAL,
+ (errmsg("database files are incompatible with server"),
+ errdetail("The database cluster was initialized with LOCALE_NAME_BUFLEN %d,"
+ " but the server was compiled with LOCALE_NAME_BUFLEN %d.",
+ ControlFile->localeBuflen, LOCALE_NAME_BUFLEN),
+ errhint("It looks like you need to recompile or initdb.")));
if (setlocale(LC_COLLATE, ControlFile->lc_collate) == NULL)
- elog(PANIC,
- "The database cluster was initialized with LC_COLLATE '%s',\n"
- "\twhich is not recognized by setlocale().\n"
- "\tIt looks like you need to initdb.",
- ControlFile->lc_collate);
+ ereport(FATAL,
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_COLLATE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_collate),
+ errhint("It looks like you need to initdb or install locale support.")));
if (setlocale(LC_CTYPE, ControlFile->lc_ctype) == NULL)
- elog(PANIC,
- "The database cluster was initialized with LC_CTYPE '%s',\n"
- "\twhich is not recognized by setlocale().\n"
- "\tIt looks like you need to initdb.",
- ControlFile->lc_ctype);
+ ereport(FATAL,
+ (errmsg("database files are incompatible with operating system"),
+ errdetail("The database cluster was initialized with LC_CTYPE \"%s\","
+ " which is not recognized by setlocale().",
+ ControlFile->lc_ctype),
+ errhint("It looks like you need to initdb or install locale support.")));
/* Make the fixed locale settings visible as GUC variables, too */
SetConfigOption("lc_collate", ControlFile->lc_collate,
fd = BasicOpenFile(ControlFilePath, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
if (fd < 0)
- elog(PANIC, "could not open control file (%s): %m", ControlFilePath);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("could not open control file \"%s\": %m",
+ ControlFilePath)));
errno = 0;
if (write(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(PANIC, "write to control file failed: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("write to control file failed: %m")));
}
if (pg_fsync(fd) != 0)
- elog(PANIC, "fsync of control file failed: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fsync of control file failed: %m")));
close(fd);
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(PANIC, "BootStrapXLOG failed to write log file: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("failed to write bootstrap xlog file: %m")));
}
if (pg_fsync(openLogFile) != 0)
- elog(PANIC, "BootStrapXLOG failed to fsync log file: %m");
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("failed to fsync bootstrap xlog file: %m")));
close(openLogFile);
openLogFile = -1;
ControlFile->state < DB_SHUTDOWNED ||
ControlFile->state > DB_IN_PRODUCTION ||
!XRecOffIsValid(ControlFile->checkPoint.xrecoff))
- elog(PANIC, "control file context is broken");
+ ereport(FATAL,
+ (errmsg("control file contains invalid data")));
if (ControlFile->state == DB_SHUTDOWNED)
- elog(LOG, "database system was shut down at %s",
- str_time(ControlFile->time));
+ ereport(LOG,
+ (errmsg("database system was shut down at %s",
+ str_time(ControlFile->time))));
else if (ControlFile->state == DB_SHUTDOWNING)
- elog(LOG, "database system shutdown was interrupted at %s",
- str_time(ControlFile->time));
+ ereport(LOG,
+ (errmsg("database system shutdown was interrupted at %s",
+ str_time(ControlFile->time))));
else if (ControlFile->state == DB_IN_RECOVERY)
- elog(LOG, "database system was interrupted being in recovery at %s\n"
- "\tThis probably means that some data blocks are corrupted\n"
- "\tand you will have to use the last backup for recovery.",
- str_time(ControlFile->time));
+ ereport(LOG,
+ (errmsg("database system was interrupted while in recovery at %s",
+ str_time(ControlFile->time)),
+ errhint("This probably means that some data is corrupted and"
+ " you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
- elog(LOG, "database system was interrupted at %s",
- str_time(ControlFile->time));
+ ereport(LOG,
+ (errmsg("database system was interrupted at %s",
+ str_time(ControlFile->time))));
/* This is just to allow attaching to startup process with a debugger */
#ifdef XLOG_REPLAY_DELAY
if (record != NULL)
{
checkPointLoc = ControlFile->checkPoint;
- elog(LOG, "checkpoint record is at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff);
+ ereport(LOG,
+ (errmsg("checkpoint record is at %X/%X",
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
}
else
{
if (record != NULL)
{
checkPointLoc = ControlFile->prevCheckPoint;
- elog(LOG, "using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff);
+ ereport(LOG,
+ (errmsg("using previous checkpoint record at %X/%X",
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
- elog(PANIC, "unable to locate a valid checkpoint record");
+ ereport(PANIC,
+ (errmsg("unable to locate a valid checkpoint record")));
}
LastRec = RecPtr = checkPointLoc;
memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
wasShutdown = (record->xl_info == XLOG_CHECKPOINT_SHUTDOWN);
- elog(LOG, "redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
- checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
- checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
- wasShutdown ? "TRUE" : "FALSE");
- elog(LOG, "next transaction id: %u; next oid: %u",
- checkPoint.nextXid, checkPoint.nextOid);
+ ereport(LOG,
+ (errmsg("redo record is at %X/%X; undo record is at %X/%X; shutdown %s",
+ checkPoint.redo.xlogid, checkPoint.redo.xrecoff,
+ checkPoint.undo.xlogid, checkPoint.undo.xrecoff,
+ wasShutdown ? "TRUE" : "FALSE")));
+ ereport(LOG,
+ (errmsg("next transaction id: %u; next oid: %u",
+ checkPoint.nextXid, checkPoint.nextOid)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
- elog(PANIC, "invalid next transaction id");
+ ereport(PANIC,
+ (errmsg("invalid next transaction id")));
ShmemVariableCache->nextXid = checkPoint.nextXid;
ShmemVariableCache->nextOid = checkPoint.nextOid;
XLogCtl->SavedRedoRecPtr = checkPoint.redo;
if (XLByteLT(RecPtr, checkPoint.redo))
- elog(PANIC, "invalid redo in checkpoint record");
+ ereport(PANIC,
+ (errmsg("invalid redo in checkpoint record")));
if (checkPoint.undo.xrecoff == 0)
checkPoint.undo = RecPtr;
XLByteLT(checkPoint.redo, RecPtr))
{
if (wasShutdown)
- elog(PANIC, "invalid redo/undo record in shutdown checkpoint");
+ ereport(PANIC,
+ (errmsg("invalid redo/undo record in shutdown checkpoint")));
InRecovery = true;
}
else if (ControlFile->state != DB_SHUTDOWNED)
{
int rmid;
- elog(LOG, "database system was not properly shut down; "
- "automatic recovery in progress");
+ ereport(LOG,
+ (errmsg("database system was not properly shut down; "
+ "automatic recovery in progress")));
ControlFile->state = DB_IN_RECOVERY;
ControlFile->time = time(NULL);
UpdateControlFile();
if (record != NULL)
{
InRedo = true;
- elog(LOG, "redo starts at %X/%X",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ ereport(LOG,
+ (errmsg("redo starts at %X/%X",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
do
{
/* nextXid must be beyond record's xid */
RmgrTable[record->xl_rmid].rm_redo(EndRecPtr, record);
record = ReadRecord(NULL, LOG, buffer);
} while (record != NULL);
- elog(LOG, "redo done at %X/%X",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ ereport(LOG,
+ (errmsg("redo done at %X/%X",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
LastRec = ReadRecPtr;
InRedo = false;
}
else
- elog(LOG, "redo is not required");
+ ereport(LOG,
+ (errmsg("redo is not required")));
}
/*
RecPtr = ReadRecPtr;
if (XLByteLT(checkPoint.undo, RecPtr))
{
- elog(LOG, "undo starts at %X/%X",
- RecPtr.xlogid, RecPtr.xrecoff);
+ ereport(LOG,
+ (errmsg("undo starts at %X/%X",
+ RecPtr.xlogid, RecPtr.xrecoff)));
do
{
record = ReadRecord(&RecPtr, PANIC, buffer);
RmgrTable[record->xl_rmid].rm_undo(EndRecPtr, record);
RecPtr = record->xl_prev;
} while (XLByteLE(checkPoint.undo, RecPtr));
- elog(LOG, "undo done at %X/%X",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff);
+ ereport(LOG,
+ (errmsg("undo done at %X/%X",
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff)));
}
else
- elog(LOG, "undo is not required");
+ ereport(LOG,
+ (errmsg("undo is not required")));
}
#endif
/* Start up the commit log, too */
StartupCLOG();
- elog(LOG, "database system is ready");
+ ereport(LOG,
+ (errmsg("database system is ready")));
CritSectionCount--;
/* Shut down readFile facility, free space */
if (!XRecOffIsValid(RecPtr.xrecoff))
{
- elog(LOG, (whichChkpt == 1 ?
- "invalid primary checkpoint link in control file" :
- "invalid secondary checkpoint link in control file"));
+ ereport(LOG,
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid %s checkpoint link in control file",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record == NULL)
{
- elog(LOG, (whichChkpt == 1 ?
- "invalid primary checkpoint record" :
- "invalid secondary checkpoint record"));
+ ereport(LOG,
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_rmid != RM_XLOG_ID)
{
- elog(LOG, (whichChkpt == 1 ?
- "invalid resource manager id in primary checkpoint record" :
- "invalid resource manager id in secondary checkpoint record"));
+ ereport(LOG,
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid resource manager id in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_info != XLOG_CHECKPOINT_SHUTDOWN &&
record->xl_info != XLOG_CHECKPOINT_ONLINE)
{
- elog(LOG, (whichChkpt == 1 ?
- "invalid xl_info in primary checkpoint record" :
- "invalid xl_info in secondary checkpoint record"));
+ ereport(LOG,
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid xl_info in %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
if (record->xl_len != sizeof(CheckPoint))
{
- elog(LOG, (whichChkpt == 1 ?
- "invalid length of primary checkpoint record" :
- "invalid length of secondary checkpoint record"));
+ ereport(LOG,
+ /* translator: %s is "primary" or "secondary" */
+ (errmsg("invalid length of %s checkpoint record",
+ (whichChkpt == 1) ? gettext("primary") : gettext("secondary"))));
return NULL;
}
return record;
void
ShutdownXLOG(void)
{
- elog(LOG, "shutting down");
+ ereport(LOG,
+ (errmsg("shutting down")));
/* suppress in-transaction check in CreateCheckPoint */
MyLastRecPtr.xrecoff = 0;
ShutdownCLOG();
CritSectionCount--;
- elog(LOG, "database system is shut down");
+ ereport(LOG,
+ (errmsg("database system is shut down")));
}
/*
uint32 _logSeg;
if (MyXactMadeXLogEntry)
- elog(ERROR, "CreateCheckPoint: cannot be called inside transaction block");
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("checkpoint cannot be made inside transaction block")));
/*
* Acquire CheckpointLock to ensure only one checkpoint happens at a time.
* recptr = end of actual checkpoint record.
*/
if (shutdown && !XLByteEQ(checkPoint.redo, ProcLastRecPtr))
- elog(PANIC, "concurrent transaction log activity while database system is shutting down");
+ ereport(PANIC,
+ (errmsg("concurrent transaction log activity while database system is shutting down")));
/*
* Select point at which we can truncate the log, which we base on the
if (openLogFile >= 0)
{
if (pg_fsync(openLogFile) != 0)
- elog(PANIC, "fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
if (open_sync_bit != new_sync_bit)
{
if (close(openLogFile) != 0)
- elog(PANIC, "close of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("close of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
}
{
case SYNC_METHOD_FSYNC:
if (pg_fsync(openLogFile) != 0)
- elog(PANIC, "fsync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fsync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#ifdef HAVE_FDATASYNC
case SYNC_METHOD_FDATASYNC:
if (pg_fdatasync(openLogFile) != 0)
- elog(PANIC, "fdatasync of log file %u, segment %u failed: %m",
- openLogId, openLogSeg);
+ ereport(PANIC,
+ (errcode_for_file_access(),
+ errmsg("fdatasync of log file %u, segment %u failed: %m",
+ openLogId, openLogSeg)));
break;
#endif
case SYNC_METHOD_OPEN:
/* write synced it already */
break;
default:
- elog(PANIC, "bogus wal_sync_method %d", sync_method);
+ elog(PANIC, "unrecognized wal_sync_method: %d", sync_method);
break;
}
}
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: elog.h,v 1.52 2003/07/21 17:05:11 tgl Exp $
+ * $Id: elog.h,v 1.53 2003/07/21 20:29:39 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#define ERRCODE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION MAKE_SQLSTATE('2','5', '0','0','5')
#define ERRCODE_READ_ONLY_SQL_TRANSACTION MAKE_SQLSTATE('2','5', '0','0','6')
#define ERRCODE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED MAKE_SQLSTATE('2','5', '0','0','7')
+#define ERRCODE_NO_ACTIVE_SQL_TRANSACTION MAKE_SQLSTATE('2','5', 'P','0','1')
/* Class 26 - Invalid SQL Statement Name */
/* (we take this to mean prepared statements) */
#define ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE MAKE_SQLSTATE('5','5', '0','0','0')
#define ERRCODE_OBJECT_IN_USE MAKE_SQLSTATE('5','5', '0','0','6')
#define ERRCODE_INDEXES_DEACTIVATED MAKE_SQLSTATE('5','5', 'P','0','1')
+#define ERRCODE_INDEX_CORRUPTED MAKE_SQLSTATE('5','5', 'P','0','2')
/* Class 57 - Operator Intervention (class borrowed from DB2) */
#define ERRCODE_OPERATOR_INTERVENTION MAKE_SQLSTATE('5','7', '0','0','0')
insert into atacc1 (test) values (2);
-- should fail
insert into atacc1 (test) values (2);
-ERROR: Cannot insert a duplicate key into unique index atacc_test1
+ERROR: duplicate key violates UNIQUE constraint "atacc_test1"
-- should succeed
insert into atacc1 (test) values (4);
-- try adding a unique oid constraint
insert into atacc1 (test,test2) values (4,4);
-- should fail
insert into atacc1 (test,test2) values (4,4);
-ERROR: Cannot insert a duplicate key into unique index atacc_test1
+ERROR: duplicate key violates UNIQUE constraint "atacc_test1"
-- should all succeed
insert into atacc1 (test,test2) values (4,5);
insert into atacc1 (test,test2) values (5,4);
-- should fail for @@ second one @@
insert into atacc1 (test2, test) values (3, 3);
insert into atacc1 (test2, test) values (2, 3);
-ERROR: Cannot insert a duplicate key into unique index atacc1_test_key
+ERROR: duplicate key violates UNIQUE constraint "atacc1_test_key"
drop table atacc1;
-- test primary key constraint adding
create table atacc1 ( test int );
insert into atacc1 (test) values (2);
-- should fail
insert into atacc1 (test) values (2);
-ERROR: Cannot insert a duplicate key into unique index atacc_test1
+ERROR: duplicate key violates UNIQUE constraint "atacc_test1"
-- should succeed
insert into atacc1 (test) values (4);
-- inserting NULL should fail
insert into atacc1 (test,test2) values (4,4);
-- should fail
insert into atacc1 (test,test2) values (4,4);
-ERROR: Cannot insert a duplicate key into unique index atacc_test1
+ERROR: duplicate key violates UNIQUE constraint "atacc_test1"
insert into atacc1 (test,test2) values (NULL,3);
ERROR: null value for attribute "test" violates NOT NULL constraint
insert into atacc1 (test,test2) values (3, NULL);
-- only first should succeed
insert into atacc1 (test2, test) values (3, 3);
insert into atacc1 (test2, test) values (2, 3);
-ERROR: Cannot insert a duplicate key into unique index atacc1_pkey
+ERROR: duplicate key violates UNIQUE constraint "atacc1_pkey"
insert into atacc1 (test2, test) values (1, NULL);
ERROR: null value for attribute "test" violates NOT NULL constraint
drop table atacc1;
insert into arr_tbl values ('{1,2}');
-- failure expected:
insert into arr_tbl values ('{1,2,3}');
-ERROR: Cannot insert a duplicate key into unique index arr_tbl_f1_key
+ERROR: duplicate key violates UNIQUE constraint "arr_tbl_f1_key"
insert into arr_tbl values ('{2,3,4}');
insert into arr_tbl values ('{1,5,3}');
insert into arr_tbl values ('{1,2,10}');
INSERT INTO func_index_heap VALUES('QWE','RTY');
-- this should fail because of unique index:
INSERT INTO func_index_heap VALUES('ABCD', 'EF');
-ERROR: Cannot insert a duplicate key into unique index func_index_index
+ERROR: duplicate key violates UNIQUE constraint "func_index_index"
-- but this shouldn't:
INSERT INTO func_index_heap VALUES('QWERTY');
--
INSERT INTO func_index_heap VALUES('QWE','RTY');
-- this should fail because of unique index:
INSERT INTO func_index_heap VALUES('ABCD', 'EF');
-ERROR: Cannot insert a duplicate key into unique index func_index_index
+ERROR: duplicate key violates UNIQUE constraint "func_index_index"
-- but this shouldn't:
INSERT INTO func_index_heap VALUES('QWERTY');
--
-- not in a xact
abort;
-WARNING: ROLLBACK: no transaction in progress
+WARNING: there is no transaction in progress
-- not in a xact
end;
-WARNING: COMMIT: no transaction in progress
+WARNING: there is no transaction in progress
--
-- CREATE AGGREGATE
-- sfunc/finalfunc type disagreement
-- Finally we want errors
--
insert into PField values ('PF1_1', 'should fail due to unique index');
-ERROR: Cannot insert a duplicate key into unique index pfield_name
+ERROR: duplicate key violates UNIQUE constraint "pfield_name"
update PSlot set backlink = 'WS.not.there' where slotname = 'PS.base.a1';
ERROR: WS.not.there does not exist
CONTEXT: PL/pgSQL function tg_backlink_a line 16 at assignment
ERROR: illegal slotlink beginning with XX
CONTEXT: PL/pgSQL function tg_slotlink_a line 16 at assignment
insert into HSlot values ('HS', 'base.hub1', 1, '');
-ERROR: Cannot insert a duplicate key into unique index hslot_name
+ERROR: duplicate key violates UNIQUE constraint "hslot_name"
insert into HSlot values ('HS', 'base.hub1', 20, '');
ERROR: no manual manipulation of HSlot
delete from HSlot;
INSERT INTO PRIMARY_TBL VALUES (1, 'one');
INSERT INTO PRIMARY_TBL VALUES (2, 'two');
INSERT INTO PRIMARY_TBL VALUES (1, 'three');
-ERROR: Cannot insert a duplicate key into unique index primary_tbl_pkey
+ERROR: duplicate key violates UNIQUE constraint "primary_tbl_pkey"
INSERT INTO PRIMARY_TBL VALUES (4, 'three');
INSERT INTO PRIMARY_TBL VALUES (5, 'one');
INSERT INTO PRIMARY_TBL (t) VALUES ('six');
INSERT INTO UNIQUE_TBL VALUES (1, 'one');
INSERT INTO UNIQUE_TBL VALUES (2, 'two');
INSERT INTO UNIQUE_TBL VALUES (1, 'three');
-ERROR: Cannot insert a duplicate key into unique index unique_tbl_i_key
+ERROR: duplicate key violates UNIQUE constraint "unique_tbl_i_key"
INSERT INTO UNIQUE_TBL VALUES (4, 'four');
INSERT INTO UNIQUE_TBL VALUES (5, 'one');
INSERT INTO UNIQUE_TBL (t) VALUES ('six');
INSERT INTO UNIQUE_TBL VALUES (2, 'two');
INSERT INTO UNIQUE_TBL VALUES (1, 'three');
INSERT INTO UNIQUE_TBL VALUES (1, 'one');
-ERROR: Cannot insert a duplicate key into unique index unique_tbl_i_key
+ERROR: duplicate key violates UNIQUE constraint "unique_tbl_i_key"
INSERT INTO UNIQUE_TBL VALUES (5, 'one');
INSERT INTO UNIQUE_TBL (t) VALUES ('six');
SELECT '' AS five, * FROM UNIQUE_TBL;