*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.27 2002/06/20 20:29:34 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/buf_table.c,v 1.28 2003/07/24 22:04:08 tgl Exp $
*
*-------------------------------------------------------------------------
*/
HASH_ELEM | HASH_FUNCTION);
if (!SharedBufHash)
- elog(FATAL, "couldn't initialize shared buffer pool Hash Tbl");
+ elog(FATAL, "could not initialize shared buffer hash table");
}
BufferDesc *
result = (BufferLookupEnt *)
hash_search(SharedBufHash, (void *) &(buf->tag), HASH_REMOVE, NULL);
- if (!result)
- {
- /* shouldn't happen */
- elog(ERROR, "BufTableDelete: BufferLookup table corrupted");
- return FALSE;
- }
+ if (!result) /* shouldn't happen */
+ elog(ERROR, "shared buffer hash table corrupted");
/*
* Clear the buffer's tag. This doesn't matter for the hash table,
hash_search(SharedBufHash, (void *) &(buf->tag), HASH_ENTER, &found);
if (!result)
- {
- elog(ERROR, "BufTableInsert: BufferLookup table out of memory");
- return FALSE;
- }
-
- /* found something else in the table ! */
- if (found)
- {
- elog(ERROR, "BufTableInsert: BufferLookup table corrupted");
- return FALSE;
- }
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of shared memory")));
+
+ if (found) /* found something else in the table? */
+ elog(ERROR, "shared buffer hash table corrupted");
result->id = buf->buf_id;
return TRUE;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.136 2003/05/10 19:04:30 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/bufmgr.c,v 1.137 2003/07/24 22:04:08 tgl Exp $
*
*-------------------------------------------------------------------------
*/
{
if (zero_damaged_pages)
{
- elog(WARNING, "Invalid page header in block %u of %s; zeroing out page",
- blockNum, RelationGetRelationName(reln));
+ ereport(WARNING,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("invalid page header in block %u of \"%s\"; zeroing out page",
+ blockNum, RelationGetRelationName(reln))));
MemSet((char *) MAKE_PTR(bufHdr->data), 0, BLCKSZ);
}
else
- elog(ERROR, "Invalid page header in block %u of %s",
- blockNum, RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("invalid page header in block %u of \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
if (!BufTableDelete(bufHdr))
{
LWLockRelease(BufMgrLock);
- elog(FATAL, "BufRead: buffer table broken after IO error");
+ elog(FATAL, "buffer table broken after I/O error");
}
/* remember that BufferAlloc() pinned the buffer */
UnpinBuffer(bufHdr);
if (smok == FALSE)
{
- elog(WARNING, "BufferAlloc: cannot write block %u for %u/%u",
- buf->tag.blockNum,
- buf->tag.rnode.tblNode, buf->tag.rnode.relNode);
+ ereport(WARNING,
+ (errcode(ERRCODE_IO_ERROR),
+ errmsg("could not write block %u of %u/%u",
+ buf->tag.blockNum,
+ buf->tag.rnode.tblNode,
+ buf->tag.rnode.relNode)));
inProgress = FALSE;
buf->flags |= BM_IO_ERROR;
buf->flags &= ~BM_IO_IN_PROGRESS;
*/
if (buf->flags & BM_JUST_DIRTIED)
{
- elog(PANIC, "BufferAlloc: content of block %u (%u/%u) changed while flushing",
+ elog(PANIC, "content of block %u of %u/%u changed while flushing",
buf->tag.blockNum,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode);
}
if (!BufTableDelete(buf))
{
LWLockRelease(BufMgrLock);
- elog(FATAL, "buffer wasn't in the buffer table");
+ elog(FATAL, "buffer wasn't in the buffer hash table");
}
INIT_BUFFERTAG(&(buf->tag), reln, blockNum);
if (!BufTableInsert(buf))
{
LWLockRelease(BufMgrLock);
- elog(FATAL, "Buffer in lookup table twice");
+ elog(FATAL, "buffer in buffer hash table twice");
}
/*
}
if (BAD_BUFFER_ID(buffer))
- elog(ERROR, "write_buffer: bad buffer %d", buffer);
+ elog(ERROR, "bad buffer id: %d", buffer);
bufHdr = &BufferDescriptors[buffer - 1];
}
if (status == SM_FAIL) /* disk failure ?! */
- elog(PANIC, "BufferSync: cannot write %u for %u/%u",
- bufHdr->tag.blockNum,
- bufHdr->tag.rnode.tblNode, bufHdr->tag.rnode.relNode);
+ ereport(PANIC,
+ (errcode(ERRCODE_IO_ERROR),
+ errmsg("could not write block %u of %u/%u",
+ bufHdr->tag.blockNum,
+ bufHdr->tag.rnode.tblNode,
+ bufHdr->tag.rnode.relNode)));
/*
* Note that it's safe to change cntxDirty here because of we
* AtEOXact_Buffers - clean up at end of transaction.
*
* During abort, we need to release any buffer pins we're holding
- * (this cleans up in case elog interrupted a routine that pins a
+ * (this cleans up in case ereport interrupted a routine that pins a
* buffer). During commit, we shouldn't need to do that, but check
* anyway to see if anyone leaked a buffer reference count.
*/
if (isCommit)
elog(WARNING,
- "Buffer Leak: [%03d] (freeNext=%d, freePrev=%d, "
- "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
+ "buffer refcount leak: [%03d] (freeNext=%d, freePrev=%d, "
+ "rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
i, buf->freeNext, buf->freePrev,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode,
buf->tag.blockNum, buf->flags,
{
/* the sole pin should be ours */
if (bufHdr->refcount != 1 || PrivateRefCount[i - 1] == 0)
- elog(FATAL, "DropRelFileNodeBuffers: block %u is referenced (private %ld, global %d)",
+ elog(FATAL, "block %u of %u/%u is still referenced (private %ld, global %d)",
bufHdr->tag.blockNum,
+ bufHdr->tag.rnode.tblNode,
+ bufHdr->tag.rnode.relNode,
PrivateRefCount[i - 1], bufHdr->refcount);
/* Make sure it will be released */
PrivateRefCount[i - 1] = 1;
if (status == SM_FAIL)
{
error_context_stack = errcontext.previous;
- elog(WARNING, "FlushRelationBuffers(%s (local), %u): block %u is dirty, could not flush it",
+ elog(WARNING, "FlushRelationBuffers(\"%s\" (local), %u): block %u is dirty, could not flush it",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum);
return (-1);
if (LocalRefCount[i] > 0)
{
error_context_stack = errcontext.previous;
- elog(WARNING, "FlushRelationBuffers(%s (local), %u): block %u is referenced (%ld)",
+ elog(WARNING, "FlushRelationBuffers(\"%s\" (local), %u): block %u is referenced (%ld)",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum, LocalRefCount[i]);
return (-2);
(char *) MAKE_PTR(bufHdr->data));
if (status == SM_FAIL) /* disk failure ?! */
- elog(PANIC, "FlushRelationBuffers: cannot write %u for %u/%u",
- bufHdr->tag.blockNum,
- bufHdr->tag.rnode.tblNode,
- bufHdr->tag.rnode.relNode);
+ ereport(PANIC,
+ (errcode(ERRCODE_IO_ERROR),
+ errmsg("could not write block %u of %u/%u",
+ bufHdr->tag.blockNum,
+ bufHdr->tag.rnode.tblNode,
+ bufHdr->tag.rnode.relNode)));
BufferFlushCount++;
{
LWLockRelease(BufMgrLock);
error_context_stack = errcontext.previous;
- elog(WARNING, "FlushRelationBuffers(%s, %u): block %u is referenced (private %ld, global %d)",
+ elog(WARNING, "FlushRelationBuffers(\"%s\", %u): block %u is referenced (private %ld, global %d)",
RelationGetRelationName(rel), firstDelBlock,
bufHdr->tag.blockNum,
PrivateRefCount[i], bufHdr->refcount);
}
if (BAD_BUFFER_ID(buffer))
- elog(ERROR, "SetBufferCommitInfoNeedsSave: bad buffer %d", buffer);
+ elog(ERROR, "bad buffer id: %d", buffer);
bufHdr = &BufferDescriptors[buffer - 1];
buf->cntxDirty = true;
}
else
- elog(ERROR, "LockBuffer: unknown lock mode %d", mode);
+ elog(ERROR, "unrecognized buffer lock mode: %d", mode);
}
/*
{
/* There should be exactly one pin */
if (LocalRefCount[-buffer - 1] != 1)
- elog(ERROR, "LockBufferForCleanup: wrong local pin count");
+ elog(ERROR, "incorrect local pin count: %ld",
+ LocalRefCount[-buffer - 1]);
/* Nobody else to wait for */
return;
}
/* There should be exactly one local pin */
if (PrivateRefCount[buffer - 1] != 1)
- elog(ERROR, "LockBufferForCleanup: wrong local pin count");
+ elog(ERROR, "incorrect local pin count: %ld",
+ PrivateRefCount[buffer - 1]);
bufHdr = &BufferDescriptors[buffer - 1];
buflock = &(BufferLocks[buffer - 1]);
{
LWLockRelease(BufMgrLock);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
- elog(ERROR, "Multiple backends attempting to wait for pincount 1");
+ elog(ERROR, "multiple backends attempting to wait for pincount 1");
}
bufHdr->wait_backend_id = MyBackendId;
bufHdr->flags |= BM_PIN_COUNT_WAITER;
/* Issue notice if this is not the first failure... */
if (buf->flags & BM_IO_ERROR)
{
- elog(WARNING, "write error may be permanent: cannot write block %u for %u/%u",
- buf->tag.blockNum,
- buf->tag.rnode.tblNode, buf->tag.rnode.relNode);
+ ereport(WARNING,
+ (errcode(ERRCODE_IO_ERROR),
+ errmsg("could not write block %u of %u/%u",
+ buf->tag.blockNum,
+ buf->tag.rnode.tblNode,
+ buf->tag.rnode.relNode),
+ errdetail("Multiple failures --- write error may be permanent.")));
}
buf->flags |= BM_DIRTY;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/freelist.c,v 1.29 2002/06/20 20:29:34 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/freelist.c,v 1.30 2003/07/24 22:04:08 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (Free_List_Descriptor == SharedFreeList->freeNext)
{
/* queue is empty. All buffers in the buffer pool are pinned. */
- elog(ERROR, "out of free buffers: time to abort!");
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("out of free buffers")));
return NULL;
}
buf = &(BufferDescriptors[SharedFreeList->freeNext]);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.47 2002/12/05 22:48:03 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/buffer/localbuf.c,v 1.48 2003/07/24 22:04:08 tgl Exp $
*
*-------------------------------------------------------------------------
*/
}
}
if (bufHdr == NULL)
- elog(ERROR, "no empty local buffer.");
+ ereport(ERROR,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("no empty local buffer available")));
/*
* this buffer is not referenced but it might still be dirty. if
char *data = (char *) malloc(BLCKSZ);
if (data == NULL)
- elog(ERROR, "Out of memory in LocalBufferAlloc");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
/*
* This is a bit of a hack: bufHdr->data needs to be a shmem
if (isCommit)
elog(WARNING,
- "Local Buffer Leak: [%03d] (rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
+ "local buffer leak: [%03d] (rel=%u/%u, blockNum=%u, flags=0x%x, refcount=%d %ld)",
i,
buf->tag.rnode.tblNode, buf->tag.rnode.relNode,
buf->tag.blockNum, buf->flags,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.16 2003/04/29 03:21:29 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/buffile.c,v 1.17 2003/07/24 22:04:09 tgl Exp $
*
* NOTES:
*
* will go away automatically at transaction end. If the underlying
* virtual File is made with OpenTemporaryFile, then all resources for
* the file are certain to be cleaned up even if processing is aborted
- * by elog(ERROR). To avoid confusion, the caller should take care that
+ * by ereport(ERROR). To avoid confusion, the caller should take care that
* all calls for a single BufFile are made in the same palloc context.
*
* BufFile also supports temporary files that exceed the OS file size limit
break;
#endif
default:
- elog(ERROR, "BufFileSeek: invalid whence: %d", whence);
+ elog(ERROR, "invalid whence: %d", whence);
return EOF;
}
while (newOffset < 0)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.98 2003/04/29 03:21:29 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/file/fd.c,v 1.99 2003/07/24 22:04:09 tgl Exp $
*
* NOTES:
*
* This is exported for use by places that really want a plain kernel FD,
* but need to be proof against running out of FDs. Once an FD has been
* successfully returned, it is the caller's responsibility to ensure that
- * it will not be leaked on elog()! Most users should *not* call this
+ * it will not be leaked on ereport()! Most users should *not* call this
* routine directly, but instead use the VFD abstraction level, which
* provides protection against descriptor leaks as well as management of
* files that need to be open for more than a short period of time.
{
int save_errno = errno;
- DO_DB(elog(LOG, "BasicOpenFile: not enough descs, retry, er= %d",
- errno));
+ ereport(LOG,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto tryAgain;
#else
no_files = (long) max_files_per_process;
#endif
- elog(LOG, "pg_nofile: sysconf(_SC_OPEN_MAX) failed; using %ld",
+ elog(LOG, "sysconf(_SC_OPEN_MAX) failed; using %ld",
no_files);
}
#else /* !HAVE_SYSCONF */
* Make sure we have enough to get by after reserving some for LD.
*/
if ((no_files - RESERVE_FOR_LD) < FD_MINFREE)
- elog(FATAL, "pg_nofile: insufficient file descriptors available to start backend.\n"
- "\tSystem allows %ld, we need at least %d.",
- no_files, RESERVE_FOR_LD + FD_MINFREE);
+ ereport(FATAL,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("insufficient file descriptors available to start backend"),
+ errdetail("System allows %ld, we need at least %d.",
+ no_files, RESERVE_FOR_LD + FD_MINFREE)));
no_files -= RESERVE_FOR_LD;
}
/* close the file */
if (close(vfdP->fd))
- elog(LOG, "LruDelete: failed to close %s: %m",
+ elog(LOG, "failed to close \"%s\": %m",
vfdP->fileName);
--nfile;
/* initialize header entry first time through */
VfdCache = (Vfd *) malloc(sizeof(Vfd));
if (VfdCache == NULL)
- elog(FATAL, "AllocateVfd: no room for VFD array");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
MemSet((char *) &(VfdCache[0]), 0, sizeof(Vfd));
VfdCache->fd = VFD_CLOSED;
newCacheSize = 32;
/*
- * Be careful not to clobber VfdCache ptr if realloc fails; we
- * will need it during proc_exit cleanup!
+ * Be careful not to clobber VfdCache ptr if realloc fails.
*/
newVfdCache = (Vfd *) realloc(VfdCache, sizeof(Vfd) * newCacheSize);
if (newVfdCache == NULL)
- elog(FATAL, "AllocateVfd: no room to enlarge VFD array");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
VfdCache = newVfdCache;
/*
int fileFlags,
int fileMode)
{
+ char *fnamecopy;
File file;
Vfd *vfdP;
- if (fileName == NULL)
- elog(ERROR, "fileNameOpenFile: NULL fname");
-
DO_DB(elog(LOG, "fileNameOpenFile: %s %x %o",
fileName, fileFlags, fileMode));
+ /*
+ * We need a malloc'd copy of the file name; fail cleanly if no room.
+ */
+ fnamecopy = strdup(fileName);
+ if (fnamecopy == NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
+
file = AllocateVfd();
vfdP = &VfdCache[file];
if (vfdP->fd < 0)
{
FreeVfd(file);
+ free(fnamecopy);
return -1;
}
++nfile;
Insert(file);
- vfdP->fileName = (char *) malloc(strlen(fileName) + 1);
- if (vfdP->fileName == NULL)
- elog(FATAL, "fileNameOpenFile: no room to save VFD filename");
- strcpy(vfdP->fileName, fileName);
-
+ vfdP->fileName = fnamecopy;
/* Saved flags are adjusted to be OK for re-opening file */
vfdP->fileFlags = fileFlags & ~(O_CREAT | O_TRUNC | O_EXCL);
vfdP->fileMode = fileMode;
O_RDWR | O_CREAT | O_TRUNC | PG_BINARY,
0600);
if (file <= 0)
- elog(ERROR, "Failed to create temporary file %s: %m",
+ elog(ERROR, "could not create temporary file \"%s\": %m",
tempfilepath);
}
/* close the file */
if (close(vfdP->fd))
- elog(LOG, "FileClose: failed to close %s: %m",
+ elog(LOG, "failed to close \"%s\": %m",
vfdP->fileName);
--nfile;
/* reset flag so that die() interrupt won't cause problems */
vfdP->fdstate &= ~FD_TEMPORARY;
if (unlink(vfdP->fileName))
- elog(LOG, "FileClose: failed to unlink %s: %m",
+ elog(LOG, "failed to unlink \"%s\": %m",
vfdP->fileName);
}
{
case SEEK_SET:
if (offset < 0)
- elog(ERROR, "FileSeek: invalid offset: %ld", offset);
+ elog(ERROR, "invalid seek offset: %ld", offset);
VfdCache[file].seekPos = offset;
break;
case SEEK_CUR:
VfdCache[file].seekPos = lseek(VfdCache[file].fd, offset, whence);
break;
default:
- elog(ERROR, "FileSeek: invalid whence: %d", whence);
+ elog(ERROR, "invalid whence: %d", whence);
break;
}
}
{
case SEEK_SET:
if (offset < 0)
- elog(ERROR, "FileSeek: invalid offset: %ld", offset);
+ elog(ERROR, "invalid seek offset: %ld", offset);
if (VfdCache[file].seekPos != offset)
VfdCache[file].seekPos = lseek(VfdCache[file].fd, offset, whence);
break;
VfdCache[file].seekPos = lseek(VfdCache[file].fd, offset, whence);
break;
default:
- elog(ERROR, "FileSeek: invalid whence: %d", whence);
+ elog(ERROR, "invalid whence: %d", whence);
break;
}
}
*
* fd.c will automatically close all files opened with AllocateFile at
* transaction commit or abort; this prevents FD leakage if a routine
- * that calls AllocateFile is terminated prematurely by elog(ERROR).
+ * that calls AllocateFile is terminated prematurely by ereport(ERROR).
*
* Ideally this should be the *only* direct call of fopen() in the backend.
*/
DO_DB(elog(LOG, "AllocateFile: Allocated %d", numAllocatedFiles));
if (numAllocatedFiles >= MAX_ALLOCATED_FILES)
- elog(ERROR, "AllocateFile: too many private FDs demanded");
+ elog(ERROR, "too many private FDs demanded");
TryAgain:
if ((file = fopen(name, mode)) != NULL)
{
int save_errno = errno;
- DO_DB(elog(LOG, "AllocateFile: not enough descs, retry, er= %d",
- errno));
+ ereport(LOG,
+ (errcode(ERRCODE_INSUFFICIENT_RESOURCES),
+ errmsg("out of file descriptors: %m; release and retry")));
errno = 0;
if (ReleaseLruFile())
goto TryAgain;
}
}
if (i < 0)
- elog(WARNING, "FreeFile: file was not obtained from AllocateFile");
+ elog(WARNING, "file passed to FreeFile was not obtained from AllocateFile");
fclose(file);
}
unlink(rm_path);
else
elog(LOG,
- "Unexpected file found in temporary-files directory: %s",
+ "unexpected file found in temporary-files directory: \"%s\"",
rm_path);
}
closedir(temp_dir);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.17 2003/03/06 00:04:27 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/freespace/freespace.c,v 1.18 2003/07/24 22:04:09 tgl Exp $
*
*
* NOTES:
/* Create table header */
FreeSpaceMap = (FSMHeader *) ShmemAlloc(sizeof(FSMHeader));
if (FreeSpaceMap == NULL)
- elog(FATAL, "Insufficient shared memory for free space map");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("insufficient shared memory for free space map")));
MemSet(FreeSpaceMap, 0, sizeof(FSMHeader));
/* Create hashtable for FSMRelations */
(HASH_ELEM | HASH_FUNCTION));
if (!FreeSpaceMap->relHash)
- elog(FATAL, "Insufficient shared memory for free space map");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("insufficient shared memory for free space map")));
/* Allocate page-storage arena */
nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1;
/* This check ensures spareChunks will be greater than zero */
if (nchunks <= MaxFSMRelations)
- elog(FATAL, "max_fsm_pages must exceed max_fsm_relations * %d",
- CHUNKPAGES);
+ ereport(FATAL,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("max_fsm_pages must exceed max_fsm_relations * %d",
+ CHUNKPAGES)));
FreeSpaceMap->arena = (char *) ShmemAlloc(nchunks * CHUNKBYTES);
if (FreeSpaceMap->arena == NULL)
- elog(FATAL, "Insufficient shared memory for free space map");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("insufficient shared memory for free space map")));
FreeSpaceMap->totalChunks = nchunks;
FreeSpaceMap->usedChunks = 0;
nchunks = (MaxFSMPages - 1) / CHUNKPAGES + 1;
if (nchunks >= (INT_MAX / CHUNKBYTES))
- elog(FATAL, "max_fsm_pages is too large");
+ ereport(FATAL,
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("max_fsm_pages is too large")));
size += MAXALIGN(nchunks * CHUNKBYTES);
/* Check caller provides sorted data */
if (i > 0 && page <= pageSpaces[i-1].blkno)
- elog(ERROR, "RecordRelationFreeSpace: data not in page order");
+ elog(ERROR, "free-space data is not in page order");
FSMPageSetPageNum(newLocation, page);
FSMPageSetSpace(newLocation, avail);
newLocation++;
/* Check caller provides sorted data */
if (i > 0 && page <= pages[i-1])
- elog(ERROR, "RecordIndexFreeSpace: data not in page order");
+ elog(ERROR, "free-space data is not in page order");
IndexFSMPageSetPageNum(newLocation, page);
newLocation++;
}
/*
* PrintFreeSpaceMapStatistics - print statistics about FSM contents
*
- * The info is sent to elog() with the specified message level. This is
+ * The info is sent to ereport() with the specified message level. This is
* intended for use during VACUUM.
*/
void
/* Convert stats to actual number of page slots needed */
needed = (sumRequests + numRels) * CHUNKPAGES;
- elog(elevel, "Free space map: %d relations, %d pages stored; %.0f total pages needed."
- "\n\tAllocated FSM size: %d relations + %d pages = %.0f KB shared mem.",
- numRels, storedPages, needed,
- MaxFSMRelations, MaxFSMPages,
- (double) FreeSpaceShmemSize() / 1024.0);
+ ereport(elevel,
+ (errmsg("free space map: %d relations, %d pages stored; %.0f total pages needed",
+ numRels, storedPages, needed),
+ errdetail("Allocated FSM size: %d relations + %d pages = %.0f KB shared mem.",
+ MaxFSMRelations, MaxFSMPages,
+ (double) FreeSpaceShmemSize() / 1024.0)));
}
/*
fp = AllocateFile(cachefilename, PG_BINARY_W);
if (fp == NULL)
{
- elog(LOG, "Failed to write %s: %m", cachefilename);
+ elog(LOG, "could not write \"%s\": %m", cachefilename);
return;
}
return;
write_failed:
- elog(LOG, "Failed to write %s: %m", cachefilename);
+ elog(LOG, "could not write \"%s\": %m", cachefilename);
/* Clean up */
LWLockRelease(FreeSpaceLock);
if (fp == NULL)
{
if (errno != ENOENT)
- elog(LOG, "Failed to read %s: %m", cachefilename);
+ elog(LOG, "could not read \"%s\": %m", cachefilename);
return;
}
header.version != FSM_CACHE_VERSION ||
header.numRels < 0)
{
- elog(LOG, "Bogus file header in %s", cachefilename);
+ elog(LOG, "bogus file header in \"%s\"", cachefilename);
goto read_failed;
}
relheader.lastPageCount < 0 ||
relheader.storedPages < 0)
{
- elog(LOG, "Bogus rel header in %s", cachefilename);
+ elog(LOG, "bogus rel header in \"%s\"", cachefilename);
goto read_failed;
}
data = (char *) palloc(len + 1); /* +1 to avoid palloc(0) */
if (fread(data, 1, len, fp) != len)
{
- elog(LOG, "Premature EOF in %s", cachefilename);
+ elog(LOG, "premature EOF in \"%s\"", cachefilename);
pfree(data);
goto read_failed;
}
HASH_ENTER,
&found);
if (!fsmrel)
- elog(ERROR, "FreeSpaceMap hashtable out of memory");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of shared memory")));
if (!found)
{
pageIndex; /* current page index */
if (fsmrel->isIndex)
- elog(ERROR, "find_free_space: called for an index relation");
+ elog(ERROR, "find_free_space called for an index relation");
info = (FSMPageData *)
(FreeSpaceMap->arena + fsmrel->firstChunk * CHUNKBYTES);
pageIndex = fsmrel->nextPage;
{
if (fsmrel->storedPages == 0)
return InvalidBlockNumber;
- elog(ERROR, "find_index_free_space: called for a non-index relation");
+ elog(ERROR, "find_index_free_space called for a non-index relation");
}
/*
* For indexes, there's no need for the nextPage state variable; we just
int pageIndex;
if (fsmrel->isIndex)
- elog(ERROR, "fsm_record_free_space: called for an index relation");
+ elog(ERROR, "fsm_record_free_space called for an index relation");
if (lookup_fsm_page_entry(fsmrel, page, &pageIndex))
{
/* Found an existing entry for page; update it */
int limitChunkIndex;
if (newAllocPages < fsmrel->storedPages)
- elog(PANIC, "compact_fsm_storage: can't juggle and compress too");
+ elog(PANIC, "can't juggle and compress too");
if (fsmrel->nextPhysical != NULL)
limitChunkIndex = fsmrel->nextPhysical->firstChunk;
else
else
limitChunkIndex = FreeSpaceMap->totalChunks;
if (newChunkIndex + curChunks > limitChunkIndex)
- elog(PANIC, "compact_fsm_storage: insufficient room");
+ elog(PANIC, "insufficient room");
}
memmove(newLocation, oldLocation, curChunks * CHUNKBYTES);
}
if (newChunkIndex < oldChunkIndex)
{
/* trouble... */
- elog(PANIC, "push_fsm_rels_after: out of room");
+ elog(PANIC, "out of room");
}
else if (newChunkIndex > oldChunkIndex)
{
Size avail = pageSpaces[i].avail;
if (avail >= BLCKSZ)
- elog(ERROR, "pack_incoming_pages: bogus freespace amount");
+ elog(ERROR, "bogus freespace amount");
avail /= (BLCKSZ/HISTOGRAM_BINS);
histogram[avail]++;
}
/* Check caller provides sorted data */
if (i > 0 && page <= pageSpaces[i-1].blkno)
- elog(ERROR, "RecordIndexFreeSpace: data not in page order");
+ elog(ERROR, "free-space data is not in page order");
/* Save this page? */
if (avail >= thresholdU ||
(avail >= thresholdL && (--binct >= 0)))
/* Shouldn't happen, but test to protect against stack clobber */
if (avail >= BLCKSZ)
- elog(ERROR, "pack_existing_pages: bogus freespace amount");
+ elog(ERROR, "bogus freespace amount");
avail /= (BLCKSZ/HISTOGRAM_BINS);
histogram[avail]++;
}
-$Header: /cvsroot/pgsql/src/backend/storage/ipc/README,v 1.2 2002/09/20 03:53:55 momjian Exp $
+$Header: /cvsroot/pgsql/src/backend/storage/ipc/README,v 1.3 2003/07/24 22:04:09 tgl Exp $
Mon Jul 18 11:09:22 PDT 1988 W.KLAS
Cache invalidation synchronization routines:
all backends. A message read by all backends is removed from the
queue automatically. If a message has been lost because the buffer
was full, all backends that haven't read this message will be
-noticed that they have to reset their cache state. This is done
+told that they have to reset their cache state. This is done
at the time when they try to read the message queue.
The message queue is implemented as a shared buffer segment. Actually,
the queue is a circle to allow fast inserting, reading (invalidate data) and
maintaining the buffer.
-
-Access to this shared message buffer is synchronized by the lock manager.
-The lock manager treats the buffer as a regular relation and sets
-relation level locks (with mode = LockWait) to block backends while
-another backend is writing or reading the buffer. The identifiers used
-for this special 'relation' are database id = 0 and relation id = 0.
-
-The current implementation prints regular (e)log information
-when a message has been removed from the buffer because the buffer
-is full, and a backend has to reset its cache state. The elog level
-is NOTICE. This can be used to improve the behavior of backends
-when invalidating or resetting their cache state.
-
-
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.82 2003/05/27 17:49:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipc.c,v 1.83 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * This flag is set during proc_exit() to change elog()'s behavior,
- * so that an elog() from an on_proc_exit routine cannot get us out
+ * This flag is set during proc_exit() to change ereport()'s behavior,
+ * so that an ereport() from an on_proc_exit routine cannot get us out
* of the exit procedure. We do NOT want to go back to the idle loop...
*/
bool proc_exit_inprogress = false;
proc_exit(int code)
{
/*
- * Once we set this flag, we are committed to exit. Any elog() will
+ * Once we set this flag, we are committed to exit. Any ereport() will
* NOT send control back to the main loop, but right back here.
*/
proc_exit_inprogress = true;
* call all the callbacks registered before calling exit().
*
* Note that since we decrement on_proc_exit_index each time, if a
- * callback calls elog(ERROR) or elog(FATAL) then it won't be invoked
- * again when control comes back here (nor will the
+ * callback calls ereport(ERROR) or ereport(FATAL) then it won't be
+ * invoked again when control comes back here (nor will the
* previously-completed callbacks). So, an infinite loop should not
* be possible.
*/
on_proc_exit(void (*function) (), Datum arg)
{
if (on_proc_exit_index >= MAX_ON_EXITS)
- elog(FATAL, "Out of on_proc_exit slots");
+ ereport(FATAL,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg_internal("out of on_proc_exit slots")));
on_proc_exit_list[on_proc_exit_index].function = function;
on_proc_exit_list[on_proc_exit_index].arg = arg;
on_shmem_exit(void (*function) (), Datum arg)
{
if (on_shmem_exit_index >= MAX_ON_EXITS)
- elog(FATAL, "Out of on_shmem_exit slots");
+ ereport(FATAL,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg_internal("out of on_shmem_exit slots")));
on_shmem_exit_list[on_shmem_exit_index].function = function;
on_shmem_exit_list[on_shmem_exit_index].arg = arg;
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.53 2003/05/27 17:49:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/ipci.c,v 1.54 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
*/
InitLocks();
if (InitLockTable(maxBackends) == INVALID_TABLEID)
- elog(FATAL, "Couldn't create the lock table");
+ elog(FATAL, "could not create the lock table");
/*
* Set up process table
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.68 2003/05/06 23:34:55 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/shmem.c,v 1.69 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
SpinLockRelease(ShmemLock);
if (!newSpace)
- elog(WARNING, "ShmemAlloc: out of memory");
+ ereport(WARNING,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
return newSpace;
}
SHMEM_INDEX_SIZE, SHMEM_INDEX_SIZE,
&info, hash_flags);
if (!ShmemIndex)
- elog(FATAL, "InitShmemIndex: couldn't initialize Shmem Index");
+ elog(FATAL, "could not initialize Shmem Index");
/*
* Now, create an entry in the hashtable for the index itself.
result = (ShmemIndexEnt *)
hash_search(ShmemIndex, (void *) &item, HASH_ENTER, &found);
if (!result)
- elog(FATAL, "InitShmemIndex: Shmem Index out of memory");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
Assert(ShmemBootstrap && !found);
if (!result)
{
LWLockRelease(ShmemIndexLock);
- elog(ERROR, "ShmemInitStruct: Shmem Index out of memory");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
return NULL;
}
{
LWLockRelease(ShmemIndexLock);
- elog(WARNING, "ShmemInitStruct: ShmemIndex entry size is wrong");
+ elog(WARNING, "ShmemIndex entry size is wrong");
/* let caller print its message too */
return NULL;
}
hash_search(ShmemIndex, (void *) &item, HASH_REMOVE, NULL);
LWLockRelease(ShmemIndexLock);
- elog(WARNING, "ShmemInitStruct: cannot allocate '%s'", name);
+ ereport(WARNING,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("could not allocate \"%s\"", name)));
*foundPtr = FALSE;
return NULL;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.56 2003/06/12 01:42:19 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinval.c,v 1.57 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
flag = SIBackendInit(shmInvalBuffer);
LWLockRelease(SInvalLock);
if (flag < 0) /* unexpected problem */
- elog(FATAL, "Backend cache invalidation initialization failed");
+ elog(FATAL, "shared cache invalidation initialization failed");
if (flag == 0) /* expected problem: MaxBackends exceeded */
- elog(FATAL, "Sorry, too many clients already");
+ ereport(FATAL,
+ (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
+ errmsg("sorry, too many clients already")));
}
/*
insertOK = SIInsertDataEntry(shmInvalBuffer, msg);
LWLockRelease(SInvalLock);
if (!insertOK)
- elog(DEBUG4, "SendSharedInvalidMessage: SI buffer overflow");
+ elog(DEBUG4, "SI buffer overflow");
}
/*
if (getResult < 0)
{
/* got a reset message */
- elog(DEBUG4, "ReceiveSharedInvalidMessages: cache state reset");
+ elog(DEBUG4, "cache state reset");
resetFunction();
}
else
snapshot->xip = (TransactionId *)
malloc(MaxBackends * sizeof(TransactionId));
if (snapshot->xip == NULL)
- elog(ERROR, "Memory exhausted in GetSnapshotData");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
}
globalxmin = xmin = GetCurrentTransactionId();
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.50 2003/05/27 17:49:46 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.51 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
MyBackendId = (stateP - &segP->procState[0]) + 1;
#ifdef INVALIDDEBUG
- elog(DEBUG2, "SIBackendInit: backend id %d", MyBackendId);
+ elog(DEBUG2, "my backend id is %d", MyBackendId);
#endif /* INVALIDDEBUG */
/* Reduce free slot count */
if (numMsgs == (MAXNUMMESSAGES * 70 / 100) &&
IsUnderPostmaster)
{
- elog(DEBUG4, "SIInsertDataEntry: table is 70%% full, signaling postmaster");
+ elog(DEBUG4, "SI table is 70%% full, signaling postmaster");
SendPostmasterSignal(PMSIGNAL_WAKEN_CHILDREN);
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.96 2002/09/02 02:47:03 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/large_object/inv_api.c,v 1.97 2003/07/24 22:04:09 tgl Exp $
*
*-------------------------------------------------------------------------
*/
{
Assert(!VARATT_IS_EXTENDED(data));
if (VARSIZE(data) < VARHDRSZ)
- elog(ERROR, "getbytealen: VARSIZE(data) < VARHDRSZ. This is internal error.");
+ elog(ERROR, "invalid VARSIZE(data)");
return (VARSIZE(data) - VARHDRSZ);
}
/* Check for duplicate (shouldn't happen) */
if (LargeObjectExists(file_oid))
- elog(ERROR, "inv_create: large object %u already exists. This is internal error.", file_oid);
+ elog(ERROR, "large object %u already exists", file_oid);
/*
* Create the LO by writing an empty first page for it in
retval->heap_r = heap_openr(LargeObjectRelationName, AccessShareLock);
}
else
- elog(ERROR, "inv_create: invalid flags: %d", flags);
+ elog(ERROR, "invalid flags: %d", flags);
retval->index_r = index_openr(LargeObjectLOidPNIndex);
LargeObjectDesc *retval;
if (!LargeObjectExists(lobjId))
- elog(ERROR, "inv_open: large object %u not found", lobjId);
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("large object %u does not exist", lobjId)));
retval = (LargeObjectDesc *) palloc(sizeof(LargeObjectDesc));
retval->heap_r = heap_openr(LargeObjectRelationName, AccessShareLock);
}
else
- elog(ERROR, "inv_open: invalid flags: %d", flags);
+ elog(ERROR, "invalid flags: %d", flags);
retval->index_r = index_openr(LargeObjectLOidPNIndex);
index_endscan(sd);
if (!found)
- elog(ERROR, "inv_getsize: large object %u not found", obj_desc->id);
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("large object %u does not exist", obj_desc->id)));
return lastbyte;
}
{
case SEEK_SET:
if (offset < 0)
- elog(ERROR, "inv_seek: invalid offset: %d", offset);
+ elog(ERROR, "invalid seek offset: %d", offset);
obj_desc->offset = offset;
break;
case SEEK_CUR:
if (offset < 0 && obj_desc->offset < ((uint32) (-offset)))
- elog(ERROR, "inv_seek: invalid offset: %d", offset);
+ elog(ERROR, "invalid seek offset: %d", offset);
obj_desc->offset += offset;
break;
case SEEK_END:
uint32 size = inv_getsize(obj_desc);
if (offset < 0 && size < ((uint32) (-offset)))
- elog(ERROR, "inv_seek: invalid offset: %d", offset);
+ elog(ERROR, "invalid seek offset: %d", offset);
obj_desc->offset = size + offset;
}
break;
default:
- elog(ERROR, "inv_seek: invalid whence: %d", whence);
+ elog(ERROR, "invalid whence: %d", whence);
}
return obj_desc->offset;
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.20 2003/03/31 20:32:29 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/deadlock.c,v 1.21 2003/07/24 22:04:13 tgl Exp $
*
* Interface:
*
*/
#include "postgres.h"
+#include "lib/stringinfo.h"
#include "miscadmin.h"
#include "storage/proc.h"
#include "utils/memutils.h"
nWaitOrders = 0;
if (!FindLockCycle(proc, possibleConstraints, &nSoftEdges))
- elog(FATAL, "DeadLockCheck: deadlock seems to have disappeared");
+ elog(FATAL, "deadlock seems to have disappeared");
return true; /* cannot find a non-deadlocked state */
}
{
/* Regenerate the list of possible added constraints */
if (nEdges != TestConfiguration(proc))
- elog(FATAL, "DeadLockCheckRecurse: inconsistent results");
+ elog(FATAL, "inconsistent results during deadlock check");
}
curConstraints[nCurConstraints] =
possibleConstraints[oldPossibleConstraints + i];
#endif
/*
- * Report details about a detected deadlock.
+ * Report a detected deadlock, with available details.
*/
void
DeadLockReport(void)
{
+ StringInfoData buf;
int i;
+ initStringInfo(&buf);
for (i = 0; i < nDeadlockDetails; i++)
{
DEADLOCK_INFO *info = &deadlockDetails[i];
else
nextpid = deadlockDetails[0].pid;
+ if (i > 0)
+ appendStringInfoChar(&buf, '\n');
+
if (info->locktag.relId == XactLockTableId && info->locktag.dbId == 0)
{
/* Lock is for transaction ID */
- elog(NOTICE, "Proc %d waits for %s on transaction %u; blocked by %d",
- info->pid,
- GetLockmodeName(info->lockmode),
- info->locktag.objId.xid,
- nextpid);
+ appendStringInfo(&buf,
+ gettext("Proc %d waits for %s on transaction %u; blocked by proc %d."),
+ info->pid,
+ GetLockmodeName(info->lockmode),
+ info->locktag.objId.xid,
+ nextpid);
}
else
{
/* Lock is for a relation */
- elog(NOTICE, "Proc %d waits for %s on relation %u database %u; blocked by %d",
- info->pid,
- GetLockmodeName(info->lockmode),
- info->locktag.relId,
- info->locktag.dbId,
- nextpid);
+ appendStringInfo(&buf,
+ gettext("Proc %d waits for %s on relation %u of database %u; blocked by proc %d."),
+ info->pid,
+ GetLockmodeName(info->lockmode),
+ info->locktag.relId,
+ info->locktag.dbId,
+ nextpid);
}
}
+ ereport(ERROR,
+ (errcode(ERRCODE_T_R_DEADLOCK_DETECTED),
+ errmsg("deadlock detected"),
+ errdetail("%s", buf.data)));
}
/*
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.56 2003/02/19 23:41:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lmgr.c,v 1.57 2003/07/24 22:04:13 tgl Exp $
*
*-------------------------------------------------------------------------
*/
LockTableId = lockmethod;
if (!(LockTableId))
- elog(ERROR, "InitLockTable: couldn't initialize lock table");
+ elog(ERROR, "could not initialize lock table");
#ifdef USER_LOCKS
*/
LongTermTableId = LockMethodTableRename(LockTableId);
if (!(LongTermTableId))
- elog(ERROR, "InitLockTable: couldn't rename long-term lock table");
+ elog(ERROR, "could not rename long-term lock table");
#endif
return LockTableId;
if (!LockAcquire(LockTableId, &tag, GetCurrentTransactionId(),
lockmode, false))
- elog(ERROR, "LockRelation: LockAcquire failed");
+ elog(ERROR, "LockAcquire failed");
/*
* Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or elog() trying.
+ * were waiting to lock it. If so, rebuild it, or ereport() trying.
* Increment the refcount to ensure that RelationFlushRelation will
* rebuild it and not just delete it.
*/
/*
* Check to see if the relcache entry has been invalidated while we
- * were waiting to lock it. If so, rebuild it, or elog() trying.
+ * were waiting to lock it. If so, rebuild it, or ereport() trying.
* Increment the refcount to ensure that RelationFlushRelation will
* rebuild it and not just delete it.
*/
*
* This routine grabs a session-level lock on the target relation. The
* session lock persists across transaction boundaries. It will be removed
- * when UnlockRelationForSession() is called, or if an elog(ERROR) occurs,
+ * when UnlockRelationForSession() is called, or if an ereport(ERROR) occurs,
* or if the backend exits.
*
* Note that one should also grab a transaction-level lock on the rel
if (!LockAcquire(LockTableId, &tag, InvalidTransactionId,
lockmode, false))
- elog(ERROR, "LockRelationForSession: LockAcquire failed");
+ elog(ERROR, "LockAcquire failed");
}
/*
if (!LockAcquire(LockTableId, &tag, GetCurrentTransactionId(),
lockmode, false))
- elog(ERROR, "LockPage: LockAcquire failed");
+ elog(ERROR, "LockAcquire failed");
}
/*
if (!LockAcquire(LockTableId, &tag, xid,
ExclusiveLock, false))
- elog(ERROR, "XactLockTableInsert: LockAcquire failed");
+ elog(ERROR, "LockAcquire failed");
}
/*
if (!LockAcquire(LockTableId, &tag, myxid,
ShareLock, false))
- elog(ERROR, "XactLockTableWait: LockAcquire failed");
+ elog(ERROR, "LockAcquire failed");
LockRelease(LockTableId, &tag, myxid, ShareLock);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.122 2003/02/19 23:41:15 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lock.c,v 1.123 2003/07/24 22:04:14 tgl Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
if (numModes >= MAX_LOCKMODES)
{
- elog(WARNING, "LockMethodTableInit: too many lock types %d greater than %d",
+ elog(WARNING, "too many lock types %d (limit is %d)",
numModes, MAX_LOCKMODES);
return INVALID_LOCKMETHOD;
}
ShmemInitStruct(shmemName, sizeof(LOCKMETHODTABLE), &found);
if (!lockMethodTable)
- elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
+ elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/*
* Lock the LWLock for the table (probably not necessary here)
hash_flags);
if (!lockMethodTable->lockHash)
- elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
+ elog(FATAL, "could not initialize lock table \"%s\"", tabName);
Assert(lockMethodTable->lockHash->hash == tag_hash);
/*
hash_flags);
if (!lockMethodTable->proclockHash)
- elog(FATAL, "LockMethodTableInit: couldn't initialize %s", tabName);
+ elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/* init data structures */
LockMethodInit(lockMethodTable, conflictsP, numModes);
* Returns: TRUE if lock was acquired, FALSE otherwise. Note that
* a FALSE return is to be expected if dontWait is TRUE;
* but if dontWait is FALSE, only a parameter error can cause
- * a FALSE return. (XXX probably we should just elog on parameter
+ * a FALSE return. (XXX probably we should just ereport on parameter
* errors, instead of conflating this with failure to acquire lock?)
*
* Side Effects: The lock is acquired and recorded in lock tables.
lockMethodTable = LockMethodTable[lockmethod];
if (!lockMethodTable)
{
- elog(WARNING, "LockAcquire: bad lock table %d", lockmethod);
+ elog(WARNING, "bad lock table id: %d", lockmethod);
return FALSE;
}
if (!lock)
{
LWLockRelease(masterLock);
- elog(ERROR, "LockAcquire: lock table %d is out of memory",
- lockmethod);
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
return FALSE;
}
if (!proclock)
{
LWLockRelease(masterLock);
- elog(ERROR, "LockAcquire: proclock table out of memory");
+ ereport(ERROR,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
return FALSE;
}
{
if (i >= (int) lockmode)
break; /* safe: we have a lock >= req level */
- elog(LOG, "Deadlock risk: raising lock level"
+ elog(LOG, "deadlock risk: raising lock level"
" from %s to %s on object %u/%u/%u",
lock_mode_names[i], lock_mode_names[lockmode],
lock->tag.relId, lock->tag.dbId, lock->tag.objId.blkno);
(void *) proclock,
HASH_REMOVE, NULL);
if (!proclock)
- elog(WARNING, "LockAcquire: remove proclock, table corrupted");
+ elog(WARNING, "proclock table corrupted");
}
else
PROCLOCK_PRINT("LockAcquire: NHOLDING", proclock);
LOCK_PRINT("WaitOnLock: aborting on lock", lock, lockmode);
LWLockRelease(lockMethodTable->masterLock);
/*
- * Now that we aren't holding the LockMgrLock, print details about
- * the detected deadlock. We didn't want to do this before because
- * sending elog messages to the client while holding the shared lock
- * is bad for concurrency.
+ * Now that we aren't holding the LockMgrLock, we can give an error
+ * report including details about the detected deadlock.
*/
DeadLockReport();
- elog(ERROR, "deadlock detected");
/* not reached */
}
/*
* let the caller print its own error message, too. Do not
- * elog(ERROR).
+ * ereport(ERROR).
*/
if (!lock)
{
LWLockRelease(masterLock);
- elog(WARNING, "LockRelease: no such lock");
+ elog(WARNING, "no such lock");
return FALSE;
}
LOCK_PRINT("LockRelease: found", lock, lockmode);
LWLockRelease(masterLock);
#ifdef USER_LOCKS
if (lockmethod == USER_LOCKMETHOD)
- elog(WARNING, "LockRelease: no lock with this tag");
+ elog(WARNING, "no lock with this tag");
else
#endif
- elog(WARNING, "LockRelease: proclock table corrupted");
+ elog(WARNING, "proclock table corrupted");
return FALSE;
}
PROCLOCK_PRINT("LockRelease: found", proclock);
PROCLOCK_PRINT("LockRelease: WRONGTYPE", proclock);
Assert(proclock->holding[lockmode] >= 0);
LWLockRelease(masterLock);
- elog(WARNING, "LockRelease: you don't own a lock of type %s",
+ elog(WARNING, "you don't own a lock of type %s",
lock_mode_names[lockmode]);
return FALSE;
}
if (!lock)
{
LWLockRelease(masterLock);
- elog(WARNING, "LockRelease: remove lock, table corrupted");
+ elog(WARNING, "lock table corrupted");
return FALSE;
}
wakeupNeeded = false; /* should be false, but make sure */
if (!proclock)
{
LWLockRelease(masterLock);
- elog(WARNING, "LockRelease: remove proclock, table corrupted");
+ elog(WARNING, "proclock table corrupted");
return FALSE;
}
}
lockMethodTable = LockMethodTable[lockmethod];
if (!lockMethodTable)
{
- elog(WARNING, "LockReleaseAll: bad lockmethod %d", lockmethod);
+ elog(WARNING, "bad lockmethod %d", lockmethod);
return FALSE;
}
if (!proclock)
{
LWLockRelease(masterLock);
- elog(WARNING, "LockReleaseAll: proclock table corrupted");
+ elog(WARNING, "proclock table corrupted");
return FALSE;
}
if (!lock)
{
LWLockRelease(masterLock);
- elog(WARNING, "LockReleaseAll: cannot remove lock from HTAB");
+ elog(WARNING, "cannot remove lock from HTAB");
return FALSE;
}
}
#ifdef LOCK_DEBUG
if (lockmethod == USER_LOCKMETHOD ? Trace_userlocks : Trace_locks)
- elog(LOG, "LockReleaseAll: done");
+ elog(LOG, "LockReleaseAll done");
#endif
return TRUE;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.15 2003/06/11 22:37:45 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/lwlock.c,v 1.16 2003/07/24 22:04:14 tgl Exp $
*
*-------------------------------------------------------------------------
*/
LWLockAssign(void)
{
if (LWLockCounter[0] >= LWLockCounter[1])
- elog(FATAL, "No more LWLockIds available");
+ elog(FATAL, "no more LWLockIds available");
return (LWLockId) (LWLockCounter[0]++);
}
* shared memory initialization.
*/
if (proc == NULL)
- elog(FATAL, "LWLockAcquire: can't wait without a PGPROC structure");
+ elog(FATAL, "cannot wait without a PGPROC structure");
proc->lwWaiting = true;
proc->lwExclusive = (mode == LW_EXCLUSIVE);
break;
}
if (i < 0)
- elog(ERROR, "LWLockRelease: lock %d is not held", (int) lockid);
+ elog(ERROR, "lock %d is not held", (int) lockid);
num_held_lwlocks--;
for (; i < num_held_lwlocks; i++)
held_lwlocks[i] = held_lwlocks[i + 1];
/*
* LWLockReleaseAll - release all currently-held locks
*
- * Used to clean up after elog(ERROR). An important difference between this
+ * Used to clean up after ereport(ERROR). An important difference between this
* function and retail LWLockRelease calls is that InterruptHoldoffCount is
* unchanged by this operation. This is necessary since InterruptHoldoffCount
- * has been set to an appropriate level earlier in error recovery. We could
+ * has been set to an appropriate level earlier in error recovery. We could
* decrement it below zero if we allow it to drop for each released lock!
*/
void
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.130 2003/05/15 16:35:29 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/lmgr/proc.c,v 1.131 2003/07/24 22:04:14 tgl Exp $
*
*-------------------------------------------------------------------------
*/
proc = (PGPROC *) ShmemAlloc(sizeof(PGPROC));
if (!proc)
- elog(FATAL, "cannot create new proc: out of memory");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
MemSet(proc, 0, sizeof(PGPROC));
PGSemaphoreCreate(&proc->sem);
proc->links.next = ProcGlobal->freeProcs;
*/
DummyProc = (PGPROC *) ShmemAlloc(sizeof(PGPROC));
if (!DummyProc)
- elog(FATAL, "cannot create new proc: out of memory");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
MemSet(DummyProc, 0, sizeof(PGPROC));
DummyProc->pid = 0; /* marks DummyProc as not in use */
PGSemaphoreCreate(&DummyProc->sem);
* we are a backend, we inherit this by fork() from the postmaster).
*/
if (procglobal == NULL)
- elog(PANIC, "InitProcess: Proc Header uninitialized");
+ elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
- elog(ERROR, "InitProcess: you already exist");
+ elog(ERROR, "you already exist");
/*
* Try to get a proc struct from the free list. If this fails, we
* standard error message.
*/
SpinLockRelease(ProcStructLock);
- elog(FATAL, "Sorry, too many clients already");
+ ereport(FATAL,
+ (errcode(ERRCODE_TOO_MANY_CONNECTIONS),
+ errmsg("sorry, too many clients already")));
}
/*
* inherit this by fork() from the postmaster).
*/
if (ProcGlobal == NULL || DummyProc == NULL)
- elog(PANIC, "InitDummyProcess: Proc Header uninitialized");
+ elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
- elog(ERROR, "InitDummyProcess: you already exist");
+ elog(ERROR, "you already exist");
/*
* DummyProc should not presently be in use by anyone else
*/
if (DummyProc->pid != 0)
- elog(FATAL, "InitDummyProcess: DummyProc is in use by PID %d",
- DummyProc->pid);
+ elog(FATAL, "DummyProc is in use by PID %d", DummyProc->pid);
MyProc = DummyProc;
/*
* Returns true if we had been waiting for a lock, else false.
*
* (Normally, this would only happen if we accept a cancel/die
- * interrupt while waiting; but an elog(ERROR) while waiting is
+ * interrupt while waiting; but an ereport(ERROR) while waiting is
* within the realm of possibility, too.)
*/
bool
* running the rather expensive deadlock-check code in most cases.
*/
if (!enable_sig_alarm(DeadlockTimeout, false))
- elog(FATAL, "ProcSleep: Unable to set timer for process wakeup");
+ elog(FATAL, "could not set timer for process wakeup");
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
* Disable the timer, if it's still running
*/
if (!disable_sig_alarm(false))
- elog(FATAL, "ProcSleep: Unable to disable timer for process wakeup");
+ elog(FATAL, "could not disable timer for process wakeup");
/*
* Now there is nothing for LockWaitCancel to do.
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.52 2003/03/28 20:17:13 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/page/bufpage.c,v 1.53 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
* If offsetNumber is not valid, then assign one by finding the first
* one that is both unused and deallocated.
*
- * !!! ELOG(ERROR) IS DISALLOWED HERE !!!
+ * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!
*
* ----------------
*/
phdr->pd_lower > phdr->pd_upper ||
phdr->pd_upper > phdr->pd_special ||
phdr->pd_special > BLCKSZ)
- elog(PANIC, "PageAddItem: corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special);
+ ereport(PANIC,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
/*
* Select offsetNumber to place the new item at
if (((*itemId).lp_flags & LP_USED) ||
((*itemId).lp_len != 0))
{
- elog(WARNING, "PageAddItem: tried overwrite of used ItemId");
+ elog(WARNING, "will not overwrite a used ItemId");
return InvalidOffsetNumber;
}
}
if (offsetNumber > limit)
{
- elog(WARNING, "PageAddItem: specified offset after maxoff");
+ elog(WARNING, "specified item offset is too large");
return InvalidOffsetNumber;
}
pd_upper > pd_special ||
pd_special > BLCKSZ ||
pd_special != MAXALIGN(pd_special))
- elog(ERROR, "PageRepairFragmentation: corrupted page pointers: lower = %u, upper = %u, special = %u",
- pd_lower, pd_upper, pd_special);
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
+ pd_lower, pd_upper, pd_special)));
nline = PageGetMaxOffsetNumber(page);
nused = 0;
itemidptr->itemoff = (*lp).lp_off;
if (itemidptr->itemoff < (int) pd_upper ||
itemidptr->itemoff >= (int) pd_special)
- elog(ERROR, "PageRepairFragmentation: corrupted item pointer %u",
- itemidptr->itemoff);
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted item pointer: %u",
+ itemidptr->itemoff)));
itemidptr->alignedlen = MAXALIGN((*lp).lp_len);
totallen += itemidptr->alignedlen;
itemidptr++;
}
if (totallen > (Size) (pd_special - pd_lower))
- elog(ERROR, "PageRepairFragmentation: corrupted item lengths, total %u, avail %u",
- (unsigned int) totallen, pd_special - pd_lower);
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted item lengths: total %u, available space %u",
+ (unsigned int) totallen, pd_special - pd_lower)));
/* sort itemIdSortData array into decreasing itemoff order */
qsort((char *) itemidbase, nused, sizeof(struct itemIdSortData),
phdr->pd_lower > phdr->pd_upper ||
phdr->pd_upper > phdr->pd_special ||
phdr->pd_special > BLCKSZ)
- elog(ERROR, "PageIndexTupleDelete: corrupted page pointers: lower = %u, upper = %u, special = %u",
- phdr->pd_lower, phdr->pd_upper, phdr->pd_special);
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted page pointers: lower = %u, upper = %u, special = %u",
+ phdr->pd_lower, phdr->pd_upper, phdr->pd_special)));
nline = PageGetMaxOffsetNumber(page);
if ((int) offnum <= 0 || (int) offnum > nline)
- elog(ERROR, "PageIndexTupleDelete: bad offnum %u", offnum);
+ elog(ERROR, "invalid index offnum: %u", offnum);
/* change offset number to offset index */
offidx = offnum - 1;
if (offset < phdr->pd_upper || (offset + size) > phdr->pd_special ||
offset != MAXALIGN(offset) || size != MAXALIGN(size))
- elog(ERROR, "PageIndexTupleDelete: corrupted item pointer: offset = %u size = %u",
- offset, (unsigned int) size);
+ ereport(ERROR,
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("corrupted item pointer: offset = %u size = %u",
+ offset, (unsigned int) size)));
/*
* First, we want to get rid of the pd_linp entry for the index tuple.
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.94 2003/01/07 01:19:12 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/md.c,v 1.95 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
seekpos = (long) (BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)));
#ifdef DIAGNOSTIC
if (seekpos >= BLCKSZ * RELSEG_SIZE)
- elog(FATAL, "seekpos too big!");
+ elog(FATAL, "seekpos too big");
#endif
#else
seekpos = (long) (BLCKSZ * (blocknum));
#ifndef LET_OS_MANAGE_FILESIZE
#ifdef DIAGNOSTIC
if (_mdnblocks(v->mdfd_vfd, BLCKSZ) > ((BlockNumber) RELSEG_SIZE))
- elog(FATAL, "segment too big!");
+ elog(FATAL, "segment too big");
#endif
#endif
#ifdef DIAGNOSTIC
if (_mdnblocks(fd, BLCKSZ) > ((BlockNumber) RELSEG_SIZE))
- elog(FATAL, "segment too big on relopen!");
+ elog(FATAL, "segment too big");
#endif
#endif
#ifdef DIAGNOSTIC
if (seekpos >= BLCKSZ * RELSEG_SIZE)
- elog(FATAL, "seekpos too big!");
+ elog(FATAL, "seekpos too big");
#endif
#else
seekpos = (long) (BLCKSZ * (blocknum));
seekpos = (long) (BLCKSZ * (blocknum % ((BlockNumber) RELSEG_SIZE)));
#ifdef DIAGNOSTIC
if (seekpos >= BLCKSZ * RELSEG_SIZE)
- elog(FATAL, "seekpos too big!");
+ elog(FATAL, "seekpos too big");
#endif
#else
seekpos = (long) (BLCKSZ * (blocknum));
seekpos = (long) (BLCKSZ * (blkno % ((BlockNumber) RELSEG_SIZE)));
#ifdef DIAGNOSTIC
if (seekpos >= BLCKSZ * RELSEG_SIZE)
- elog(FATAL, "seekpos too big!");
+ elog(FATAL, "seekpos too big");
#endif
#else
seekpos = (long) (BLCKSZ * (blkno));
errno = 0;
if (lseek(fd, seekpos, SEEK_SET) != seekpos)
{
- elog(LOG, "mdblindwrt: lseek(%ld) failed: %m", seekpos);
+ elog(LOG, "lseek(%ld) failed: %m", seekpos);
close(fd);
return SM_FAIL;
}
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
- elog(LOG, "mdblindwrt: write() failed: %m");
+ elog(LOG, "write() failed: %m");
status = SM_FAIL;
}
if (close(fd) < 0)
{
- elog(LOG, "mdblindwrt: close() failed: %m");
+ elog(LOG, "close() failed: %m");
status = SM_FAIL;
}
* called, then only segments up to the last one actually touched
* are present in the chain...
*
- * Returns # of blocks, elog's on error.
+ * Returns # of blocks, ereport's on error.
*/
BlockNumber
mdnblocks(Relation reln)
{
nblocks = _mdnblocks(v->mdfd_vfd, BLCKSZ);
if (nblocks > ((BlockNumber) RELSEG_SIZE))
- elog(FATAL, "segment too big in mdnblocks!");
+ elog(FATAL, "segment too big");
if (nblocks < ((BlockNumber) RELSEG_SIZE))
return (segno * ((BlockNumber) RELSEG_SIZE)) + nblocks;
*/
v->mdfd_chain = _mdfd_openseg(reln, segno, O_CREAT);
if (v->mdfd_chain == (MdfdVec *) NULL)
- elog(ERROR, "cannot count blocks for %s -- open failed: %m",
+ elog(ERROR, "could not count blocks for \"%s\": %m",
RelationGetRelationName(reln));
}
#ifdef DIAGNOSTIC
if (_mdnblocks(fd, BLCKSZ) > ((BlockNumber) RELSEG_SIZE))
- elog(FATAL, "segment too big on openseg!");
+ elog(FATAL, "segment too big");
#endif
#endif
if (fd < 0)
{
if ((fd = mdopen(reln)) < 0)
- elog(ERROR, "_mdfd_getrelnfd: cannot open relation %s: %m",
+ elog(ERROR, "could not open relation \"%s\": %m",
RelationGetRelationName(reln));
reln->rd_fd = fd;
}
v->mdfd_chain = _mdfd_openseg(reln, i, (segno == 1) ? O_CREAT : 0);
if (v->mdfd_chain == (MdfdVec *) NULL)
- elog(ERROR, "cannot open segment %u of relation %s (target block %u): %m",
+ elog(ERROR, "could not open segment %u of relation \"%s\" (target block %u): %m",
i, RelationGetRelationName(reln), blkno);
}
v = v->mdfd_chain;
/* call fd.c to allow other FDs to be closed if needed */
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, 0600);
if (fd < 0)
- elog(LOG, "_mdfd_blind_getseg: couldn't open %s: %m", path);
+ elog(LOG, "could not open \"%s\": %m", path);
pfree(path);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.32 2002/08/06 02:36:34 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/Attic/mm.c,v 1.33 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (entry == (MMRelHashEntry *) NULL)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "main memory storage mgr hash table out of memory");
+ ereport(FATAL,
+ (errcode(ERRCODE_OUT_OF_MEMORY),
+ errmsg("out of memory")));
}
if (found)
if (entry == (MMHashEntry *) NULL)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "mmunlink: cache hash table corrupted");
+ elog(FATAL, "cache hash table corrupted");
}
MMBlockTags[i].mmct_dbid = (Oid) 0;
MMBlockTags[i].mmct_relid = (Oid) 0;
if (rentry == (MMRelHashEntry *) NULL)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "mmunlink: rel cache hash table corrupted");
+ elog(FATAL, "rel cache hash table corrupted");
}
(*MMCurRelno)--;
if (rentry == (MMRelHashEntry *) NULL)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "mmextend: rel cache hash table corrupt");
+ elog(FATAL, "rel cache hash table corrupted");
}
tag.mmct_blkno = rentry->mmrhe_nblocks;
if (entry == (MMHashEntry *) NULL || found)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "mmextend: cache hash table corrupt");
+ elog(FATAL, "cache hash table corrupted");
}
entry->mmhe_bufno = i;
if (entry == (MMHashEntry *) NULL)
{
LWLockRelease(MMCacheLock);
- elog(FATAL, "mmwrite: hash table missing requested page");
+ elog(FATAL, "cache hash table missing requested page");
}
offset = (entry->mmhe_bufno * BLCKSZ);
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.62 2003/03/04 21:51:21 tgl Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgr.c,v 1.63 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (smgrsw[i].smgr_init)
{
if ((*(smgrsw[i].smgr_init)) () == SM_FAIL)
- elog(FATAL, "initialization failed on %s: %m",
+ elog(FATAL, "smgr initialization failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
}
if (smgrsw[i].smgr_shutdown)
{
if ((*(smgrsw[i].smgr_shutdown)) () == SM_FAIL)
- elog(FATAL, "shutdown failed on %s: %m",
+ elog(FATAL, "smgr shutdown failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
}
PendingRelDelete *pending;
if ((fd = (*(smgrsw[which].smgr_create)) (reln)) < 0)
- elog(ERROR, "cannot create %s: %m", RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not create \"%s\": %m",
+ RelationGetRelationName(reln))));
/* Add the relation to the list of stuff to delete at abort */
pending = (PendingRelDelete *)
status = (*(smgrsw[which].smgr_extend)) (reln, blocknum, buffer);
if (status == SM_FAIL)
- elog(ERROR, "cannot extend %s: %m.\n\tCheck free disk space.",
- RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not extend \"%s\": %m",
+ RelationGetRelationName(reln)),
+ errhint("Check free disk space.")));
return status;
}
return -1;
if ((fd = (*(smgrsw[which].smgr_open)) (reln)) < 0)
if (!failOK)
- elog(ERROR, "cannot open %s: %m", RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not open \"%s\": %m",
+ RelationGetRelationName(reln))));
return fd;
}
smgrclose(int16 which, Relation reln)
{
if ((*(smgrsw[which].smgr_close)) (reln) == SM_FAIL)
- elog(ERROR, "cannot close %s: %m", RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not close \"%s\": %m",
+ RelationGetRelationName(reln))));
return SM_SUCCESS;
}
status = (*(smgrsw[which].smgr_read)) (reln, blocknum, buffer);
if (status == SM_FAIL)
- elog(ERROR, "cannot read block %d of %s: %m",
- blocknum, RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not read block %d of \"%s\": %m",
+ blocknum, RelationGetRelationName(reln))));
return status;
}
status = (*(smgrsw[which].smgr_write)) (reln, blocknum, buffer);
if (status == SM_FAIL)
- elog(ERROR, "cannot write block %d of %s: %m",
- blocknum, RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write block %d of \"%s\": %m",
+ blocknum, RelationGetRelationName(reln))));
return status;
}
status = (*(smgrsw[which].smgr_blindwrt)) (rnode, blkno, buffer);
if (status == SM_FAIL)
- elog(ERROR, "cannot write block %d of %u/%u blind: %m",
- blkno, rnode.tblNode, rnode.relNode);
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not write block %d of %u/%u blind: %m",
+ blkno, rnode.tblNode, rnode.relNode)));
return status;
}
* actually is InvalidBlockNumber.
*/
if (nblocks == InvalidBlockNumber)
- elog(ERROR, "cannot count blocks for %s: %m",
- RelationGetRelationName(reln));
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not count blocks of \"%s\": %m",
+ RelationGetRelationName(reln))));
return nblocks;
}
newblks = (*(smgrsw[which].smgr_truncate)) (reln, nblocks);
if (newblks == InvalidBlockNumber)
- elog(ERROR, "cannot truncate %s to %u blocks: %m",
- RelationGetRelationName(reln), nblocks);
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not truncate \"%s\" to %u blocks: %m",
+ RelationGetRelationName(reln), nblocks)));
}
return newblks;
* current xact.
*/
if ((*(smgrsw[pending->which].smgr_unlink)) (pending->relnode) == SM_FAIL)
- elog(WARNING, "cannot unlink %u/%u: %m",
- pending->relnode.tblNode, pending->relnode.relNode);
+ ereport(WARNING,
+ (errcode_for_file_access(),
+ errmsg("could not unlink %u/%u: %m",
+ pending->relnode.tblNode,
+ pending->relnode.relNode)));
}
pfree(pending);
}
smgriswo(int16 smgrno)
{
if (smgrno < 0 || smgrno >= NSmgr)
- elog(ERROR, "illegal storage manager number %d", smgrno);
+ elog(ERROR, "invalid storage manager id: %d", smgrno);
return smgrwo[smgrno];
}
*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgrtype.c,v 1.19 2002/06/20 20:29:36 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/storage/smgr/smgrtype.c,v 1.20 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
if (strcmp(s, StorageManager[i].smgr_name) == 0)
PG_RETURN_INT16(i);
}
- elog(ERROR, "smgrin: unknown storage manager name '%s'", s);
+ elog(ERROR, "unrecognized storage manager name \"%s\"", s);
PG_RETURN_INT16(0);
}
char *s;
if (i >= NStorageManagers || i < 0)
- elog(ERROR, "Illegal storage manager id %d", i);
+ elog(ERROR, "invalid storage manager id: %d", i);
s = pstrdup(StorageManager[i].smgr_name);
PG_RETURN_CSTRING(s);
* Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $Id: elog.h,v 1.56 2003/07/24 17:52:49 tgl Exp $
+ * $Id: elog.h,v 1.57 2003/07/24 22:04:15 tgl Exp $
*
*-------------------------------------------------------------------------
*/
#define ERRCODE_T_R_INTEGRITY_CONSTRAINT_VIOLATION MAKE_SQLSTATE('4','0', '0','0','2')
#define ERRCODE_T_R_SERIALIZATION_FAILURE MAKE_SQLSTATE('4','0', '0','0','1')
#define ERRCODE_T_R_STATEMENT_COMPLETION_UNKNOWN MAKE_SQLSTATE('4','0', '0','0','3')
+#define ERRCODE_T_R_DEADLOCK_DETECTED MAKE_SQLSTATE('4','0', 'P','0','1')
/* Class 42 - Syntax Error or Access Rule Violation */
#define ERRCODE_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION MAKE_SQLSTATE('4','2', '0','0','0')
#define ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE MAKE_SQLSTATE('5','5', '0','0','0')
#define ERRCODE_OBJECT_IN_USE MAKE_SQLSTATE('5','5', '0','0','6')
#define ERRCODE_INDEXES_DEACTIVATED MAKE_SQLSTATE('5','5', 'P','0','1')
-#define ERRCODE_INDEX_CORRUPTED MAKE_SQLSTATE('5','5', 'P','0','2')
/* Class 57 - Operator Intervention (class borrowed from DB2) */
#define ERRCODE_OPERATOR_INTERVENTION MAKE_SQLSTATE('5','7', '0','0','0')
/* Class XX - Internal Error (PostgreSQL-specific error class) */
/* (this is for "can't-happen" conditions and software bugs) */
#define ERRCODE_INTERNAL_ERROR MAKE_SQLSTATE('X','X', '0','0','0')
+#define ERRCODE_DATA_CORRUPTED MAKE_SQLSTATE('X','X', '0','0','1')
+#define ERRCODE_INDEX_CORRUPTED MAKE_SQLSTATE('X','X', '0','0','2')
/* Which __func__ symbol do we have, if any? */