- *cleaned = (ItemPointerData*)palloc(sizeof(ItemPointerData)*nitem);
+ *cleaned = (ItemPointerData *) palloc(sizeof(ItemPointerData) * nitem);
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, i));
-ginVacuumPostingTreeLeaves( GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer ) {
+ginVacuumPostingTreeLeaves(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, Buffer *rootBuffer)
- if ( ginVacuumPostingTreeLeaves( gvs, PostingItemGetBlockNumber(pitem), FALSE, NULL ) )
+ if (ginVacuumPostingTreeLeaves(gvs, PostingItemGetBlockNumber(pitem), FALSE, NULL))
-ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
- Buffer lBuffer = (leftBlkno==InvalidBlockNumber) ? InvalidBuffer : ReadBuffer( gvs->index, leftBlkno );
- if ( !isParentRoot ) /* parent is already locked by LockBufferForCleanup() */
+ginDeletePage(GinVacuumState *gvs, BlockNumber deleteBlkno, BlockNumber leftBlkno,
+ Buffer lBuffer = (leftBlkno == InvalidBlockNumber) ? InvalidBuffer : ReadBuffer(gvs->index, leftBlkno);
-ginScanToDelete( GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff ) {
+ginScanToDelete(GinVacuumState *gvs, BlockNumber blkno, bool isRoot, DataPageDeleteStack *parent, OffsetNumber myoff)
- ginDeletePage( gvs, blkno, me->blkno, me->parent->blkno, myoff, me->parent->isRoot );
+ ginDeletePage(gvs, blkno, me->blkno, me->parent->blkno, myoff, me->parent->isRoot);
- if ( ginVacuumPostingTreeLeaves(gvs, rootBlkno, TRUE, &rootBuffer)==FALSE ) {
* then page is copied into temprorary one.
-ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint32 *nroot) {
+ginVacuumEntryPage(GinVacuumState *gvs, Buffer buffer, BlockNumber *roots, uint32 *nroot)
- IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
+ IndexTuple itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
- we can't vacuum it just now due to risk of deadlocks with scans/inserts */
- /* if we already create temrorary page, we will make changes in place */
- ItemPointerData *cleaned = (tmppage==origpage) ? NULL : GinGetPosting(itup );
- uint32 newN = ginVacuumPostingList( gvs, GinGetPosting(itup), GinGetNPosting(itup), &cleaned );
+ ItemPointerData *cleaned = (tmppage == origpage) ? NULL : GinGetPosting(itup);
+ uint32 newN = ginVacuumPostingList(gvs, GinGetPosting(itup), GinGetNPosting(itup), &cleaned);
* and copies content in to it.
+ Size pos = ((char *) GinGetPosting(itup)) - ((char *) origpage);
- Size pos = ((char*)GinGetPosting(itup)) - ((char*)origpage);
- memcpy( tmppage+pos, cleaned, sizeof(ItemPointerData)*newN );
+ memcpy(tmppage + pos, cleaned, sizeof(ItemPointerData) * newN);
itup = (IndexTuple) PageGetItem(tmppage, PageGetItemId(tmppage, i));
itup = GinFormTuple(&gvs->ginstate, value, GinGetPosting(itup), newN);
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, FirstOffsetNumber));
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.2 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistsplit.c,v 1.3 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "access/gist_private.h"
-typedef struct {
- Datum *attr;
- int len;
+typedef struct
+{
+ Datum *attr;
+ int len;
OffsetNumber *entries;
- bool *isnull;
- bool *equiv;
+ bool *isnull;
+ bool *equiv;
} GistSplitUnion;
* Forms unions of subkeys after page split, but
* uses only tuples aren't in groups of equalent tuples
*/
-static void
-gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
- GistSplitUnion *gsvp, int startkey) {
- IndexTuple *cleanedItVec;
- int i, cleanedLen=0;
+static void
+gistunionsubkeyvec(GISTSTATE *giststate, IndexTuple *itvec,
+ GistSplitUnion *gsvp, int startkey)
+{
+ IndexTuple *cleanedItVec;
+ int i,
+ cleanedLen = 0;
- cleanedItVec = (IndexTuple*)palloc(sizeof(IndexTuple) * gsvp->len);
+ cleanedItVec = (IndexTuple *) palloc(sizeof(IndexTuple) * gsvp->len);
- for(i=0;ilen;i++) {
- if ( gsvp->equiv && gsvp->equiv[gsvp->entries[i]])
+ for (i = 0; i < gsvp->len; i++)
+ {
+ if (gsvp->equiv && gsvp->equiv[gsvp->entries[i]])
continue;
cleanedItVec[cleanedLen++] = itvec[gsvp->entries[i] - 1];
}
- gistMakeUnionItVec(giststate, cleanedItVec, cleanedLen, startkey,
- gsvp->attr, gsvp->isnull);
+ gistMakeUnionItVec(giststate, cleanedItVec, cleanedLen, startkey,
+ gsvp->attr, gsvp->isnull);
- pfree( cleanedItVec );
+ pfree(cleanedItVec);
}
/*
static void
gistunionsubkey(GISTSTATE *giststate, IndexTuple *itvec, GistSplitVector *spl, int attno)
{
- GistSplitUnion gsvp;
+ GistSplitUnion gsvp;
gsvp.equiv = spl->spl_equiv;
}
/*
- * find group in vector with equivalent value
+ * find group in vector with equivalent value
*/
static int
gistfindgroup(Relation r, GISTSTATE *giststate, GISTENTRY *valvec, GistSplitVector *spl, int attno)
{
int i;
GISTENTRY entry;
- int len=0;
+ int len = 0;
/*
- * attno key is always not null (see gistSplitByKey), so we may not check for
- * nulls
+ * attno key is always not null (see gistSplitByKey), so we may not check
+ * for nulls
*/
gistentryinit(entry, spl->splitVector.spl_rdatum, r, NULL, (OffsetNumber) 0, FALSE);
- for (i = 0; i < spl->splitVector.spl_nleft; i++) {
- float penalty = gistpenalty(giststate, attno, &entry, false,
- &valvec[spl->splitVector.spl_left[i]], false);
- if ( penalty == 0.0 ) {
+ for (i = 0; i < spl->splitVector.spl_nleft; i++)
+ {
+ float penalty = gistpenalty(giststate, attno, &entry, false,
+ &valvec[spl->splitVector.spl_left[i]], false);
+
+ if (penalty == 0.0)
+ {
spl->spl_equiv[spl->splitVector.spl_left[i]] = true;
len++;
}
}
gistentryinit(entry, spl->splitVector.spl_ldatum, r, NULL, (OffsetNumber) 0, FALSE);
- for (i = 0; i < spl->splitVector.spl_nright; i++) {
- float penalty = gistpenalty(giststate, attno, &entry, false,
- &valvec[spl->splitVector.spl_right[i]], false);
- if ( penalty == 0.0 ) {
+ for (i = 0; i < spl->splitVector.spl_nright; i++)
+ {
+ float penalty = gistpenalty(giststate, attno, &entry, false,
+ &valvec[spl->splitVector.spl_right[i]], false);
+
+ if (penalty == 0.0)
+ {
spl->spl_equiv[spl->splitVector.spl_right[i]] = true;
len++;
}
}
static void
-cleanupOffsets( OffsetNumber *a, int *len, bool *equiv, int *LenEquiv ) {
- int curlen,i;
- OffsetNumber *curwpos;
+cleanupOffsets(OffsetNumber *a, int *len, bool *equiv, int *LenEquiv)
+{
+ int curlen,
+ i;
+ OffsetNumber *curwpos;
curlen = *len;
curwpos = a;
- for (i = 0; i < *len; i++) {
- if ( equiv[ a[i] ] == FALSE ) {
+ for (i = 0; i < *len; i++)
+ {
+ if (equiv[a[i]] == FALSE)
+ {
*curwpos = a[i];
curwpos++;
- } else {
+ }
+ else
+ {
/* corner case: we shouldn't make void array */
- if ( curlen==1 ) {
- equiv[ a[i] ] = FALSE; /* mark item as non-equivalent */
- i--; /* redo the same */
+ if (curlen == 1)
+ {
+ equiv[a[i]] = FALSE; /* mark item as non-equivalent */
+ i--; /* redo the same */
*LenEquiv -= 1;
continue;
- } else
+ }
+ else
curlen--;
}
}
}
static void
-placeOne( Relation r, GISTSTATE *giststate, GistSplitVector *v, IndexTuple itup, OffsetNumber off, int attno ) {
+placeOne(Relation r, GISTSTATE *giststate, GistSplitVector *v, IndexTuple itup, OffsetNumber off, int attno)
+{
GISTENTRY identry[INDEX_MAX_KEYS];
bool isnull[INDEX_MAX_KEYS];
- bool toLeft = true;
+ bool toLeft = true;
gistDeCompressAtt(giststate, r, itup, NULL, (OffsetNumber) 0, identry, isnull);
- for(;attnotupdesc->natts;attno++) {
- float lpenalty, rpenalty;
+ for (; attno < giststate->tupdesc->natts; attno++)
+ {
+ float lpenalty,
+ rpenalty;
GISTENTRY entry;
- gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
- lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno], identry+attno, isnull[ attno ]);
- gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
- rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno], identry+attno, isnull[ attno ]);
+ gistentryinit(entry, v->spl_lattr[attno], r, NULL, 0, FALSE);
+ lpenalty = gistpenalty(giststate, attno, &entry, v->spl_lisnull[attno], identry + attno, isnull[attno]);
+ gistentryinit(entry, v->spl_rattr[attno], r, NULL, 0, FALSE);
+ rpenalty = gistpenalty(giststate, attno, &entry, v->spl_risnull[attno], identry + attno, isnull[attno]);
- if ( lpenalty != rpenalty ) {
- if ( lpenalty > rpenalty )
+ if (lpenalty != rpenalty)
+ {
+ if (lpenalty > rpenalty)
toLeft = false;
break;
}
}
- if ( toLeft )
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = off;
+ if (toLeft)
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = off;
else
- v->splitVector.spl_right[ v->splitVector.spl_nright++ ] = off;
+ v->splitVector.spl_right[v->splitVector.spl_nright++] = off;
}
#define SWAPVAR( s, d, t ) \
} while(0)
/*
- * adjust left and right unions according to splits by previous
- * split by firsts columns. This function is called only in case
+ * adjust left and right unions according to splits by previous
+ * split by firsts columns. This function is called only in case
* when pickSplit doesn't support subspplit.
*/
static void
-supportSecondarySplit( Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC *sv, Datum oldL, Datum oldR ) {
- bool leaveOnLeft = true, tmpBool;
- GISTENTRY entryL, entryR, entrySL, entrySR;
-
- gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
- gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
- gistentryinit(entrySL, sv->spl_ldatum , r, NULL, 0, FALSE);
- gistentryinit(entrySR, sv->spl_rdatum , r, NULL, 0, FALSE);
-
- if ( sv->spl_ldatum_exists && sv->spl_rdatum_exists ) {
- float penalty1, penalty2;
+supportSecondarySplit(Relation r, GISTSTATE *giststate, int attno, GIST_SPLITVEC *sv, Datum oldL, Datum oldR)
+{
+ bool leaveOnLeft = true,
+ tmpBool;
+ GISTENTRY entryL,
+ entryR,
+ entrySL,
+ entrySR;
+
+ gistentryinit(entryL, oldL, r, NULL, 0, FALSE);
+ gistentryinit(entryR, oldR, r, NULL, 0, FALSE);
+ gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
+ gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
+
+ if (sv->spl_ldatum_exists && sv->spl_rdatum_exists)
+ {
+ float penalty1,
+ penalty2;
penalty1 = gistpenalty(giststate, attno, &entryL, false, &entrySL, false) +
- gistpenalty(giststate, attno, &entryR, false, &entrySR, false);
+ gistpenalty(giststate, attno, &entryR, false, &entrySR, false);
penalty2 = gistpenalty(giststate, attno, &entryL, false, &entrySR, false) +
- gistpenalty(giststate, attno, &entryR, false, &entrySL, false);
+ gistpenalty(giststate, attno, &entryR, false, &entrySL, false);
- if ( penalty1 > penalty2 )
+ if (penalty1 > penalty2)
leaveOnLeft = false;
- } else {
- GISTENTRY *entry1 = (sv->spl_ldatum_exists) ? &entryL : &entryR;
- float penalty1, penalty2;
+ }
+ else
+ {
+ GISTENTRY *entry1 = (sv->spl_ldatum_exists) ? &entryL : &entryR;
+ float penalty1,
+ penalty2;
/*
- * there is only one previously defined union,
- * so we just choose swap or not by lowest penalty
+ * there is only one previously defined union, so we just choose swap
+ * or not by lowest penalty
*/
penalty1 = gistpenalty(giststate, attno, entry1, false, &entrySL, false);
penalty2 = gistpenalty(giststate, attno, entry1, false, &entrySR, false);
- if ( penalty1 < penalty2 )
- leaveOnLeft = ( sv->spl_ldatum_exists ) ? true : false;
+ if (penalty1 < penalty2)
+ leaveOnLeft = (sv->spl_ldatum_exists) ? true : false;
else
- leaveOnLeft = ( sv->spl_rdatum_exists ) ? true : false;
+ leaveOnLeft = (sv->spl_rdatum_exists) ? true : false;
}
- if ( leaveOnLeft == false ) {
+ if (leaveOnLeft == false)
+ {
/*
- * swap left and right
+ * swap left and right
*/
- OffsetNumber *off, noff;
- Datum datum;
-
- SWAPVAR( sv->spl_left, sv->spl_right, off );
- SWAPVAR( sv->spl_nleft, sv->spl_nright, noff );
- SWAPVAR( sv->spl_ldatum, sv->spl_rdatum, datum );
- gistentryinit(entrySL, sv->spl_ldatum , r, NULL, 0, FALSE);
- gistentryinit(entrySR, sv->spl_rdatum , r, NULL, 0, FALSE);
+ OffsetNumber *off,
+ noff;
+ Datum datum;
+
+ SWAPVAR(sv->spl_left, sv->spl_right, off);
+ SWAPVAR(sv->spl_nleft, sv->spl_nright, noff);
+ SWAPVAR(sv->spl_ldatum, sv->spl_rdatum, datum);
+ gistentryinit(entrySL, sv->spl_ldatum, r, NULL, 0, FALSE);
+ gistentryinit(entrySR, sv->spl_rdatum, r, NULL, 0, FALSE);
}
- if ( sv->spl_ldatum_exists )
+ if (sv->spl_ldatum_exists)
gistMakeUnionKey(giststate, attno, &entryL, false, &entrySL, false,
- &sv->spl_ldatum, &tmpBool);
+ &sv->spl_ldatum, &tmpBool);
- if ( sv->spl_rdatum_exists )
+ if (sv->spl_rdatum_exists)
gistMakeUnionKey(giststate, attno, &entryR, false, &entrySR, false,
- &sv->spl_rdatum, &tmpBool);
+ &sv->spl_rdatum, &tmpBool);
sv->spl_ldatum_exists = sv->spl_rdatum_exists = false;
}
* get better split.
* Returns TRUE and v->spl_equiv = NULL if left and right unions of attno columns are the same,
* so caller may find better split
- * Returns TRUE and v->spl_equiv != NULL if there is tuples which may be freely moved
+ * Returns TRUE and v->spl_equiv != NULL if there is tuples which may be freely moved
*/
static bool
gistUserPicksplit(Relation r, GistEntryVector *entryvec, int attno, GistSplitVector *v,
IndexTuple *itup, int len, GISTSTATE *giststate)
{
GIST_SPLITVEC *sv = &v->splitVector;
+
/*
* now let the user-defined picksplit function set up the split vector; in
* entryvec have no null value!!
*/
- sv->spl_ldatum_exists = ( v->spl_lisnull[ attno ] ) ? false : true;
- sv->spl_rdatum_exists = ( v->spl_risnull[ attno ] ) ? false : true;
+ sv->spl_ldatum_exists = (v->spl_lisnull[attno]) ? false : true;
+ sv->spl_rdatum_exists = (v->spl_risnull[attno]) ? false : true;
sv->spl_ldatum = v->spl_lattr[attno];
sv->spl_rdatum = v->spl_rattr[attno];
if (sv->spl_right[sv->spl_nright - 1] == InvalidOffsetNumber)
sv->spl_right[sv->spl_nright - 1] = (OffsetNumber) (entryvec->n - 1);
- if( sv->spl_ldatum_exists || sv->spl_rdatum_exists ) {
- elog(LOG,"PickSplit method of %d columns of index '%s' doesn't support secondary split",
- attno + 1, RelationGetRelationName(r) );
+ if (sv->spl_ldatum_exists || sv->spl_rdatum_exists)
+ {
+ elog(LOG, "PickSplit method of %d columns of index '%s' doesn't support secondary split",
+ attno + 1, RelationGetRelationName(r));
- supportSecondarySplit( r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno] );
+ supportSecondarySplit(r, giststate, attno, sv, v->spl_lattr[attno], v->spl_rattr[attno]);
}
v->spl_lattr[attno] = sv->spl_ldatum;
*/
v->spl_equiv = NULL;
- if (giststate->tupdesc->natts > 1 && attno+1 != giststate->tupdesc->natts)
+ if (giststate->tupdesc->natts > 1 && attno + 1 != giststate->tupdesc->natts)
{
- if ( gistKeyIsEQ(giststate, attno, sv->spl_ldatum, sv->spl_rdatum) ) {
+ if (gistKeyIsEQ(giststate, attno, sv->spl_ldatum, sv->spl_rdatum))
+ {
/*
- * Left and right key's unions are equial, so
- * we can get better split by following columns. Note,
- * unions for attno columns are already done.
+ * Left and right key's unions are equial, so we can get better
+ * split by following columns. Note, unions for attno columns are
+ * already done.
*/
return true;
- } else {
+ }
+ else
+ {
int LenEquiv;
- v->spl_equiv = (bool *) palloc0(sizeof(bool) * (entryvec->n+1));
+ v->spl_equiv = (bool *) palloc0(sizeof(bool) * (entryvec->n + 1));
LenEquiv = gistfindgroup(r, giststate, entryvec->vector, v, attno);
/*
- * if possible, we should distribute equivalent tuples
- */
- if (LenEquiv == 0 ) {
+ * if possible, we should distribute equivalent tuples
+ */
+ if (LenEquiv == 0)
+ {
gistunionsubkey(giststate, itup, v, attno + 1);
- } else {
- cleanupOffsets( sv->spl_left, &sv->spl_nleft, v->spl_equiv, &LenEquiv );
- cleanupOffsets( sv->spl_right, &sv->spl_nright, v->spl_equiv, &LenEquiv );
+ }
+ else
+ {
+ cleanupOffsets(sv->spl_left, &sv->spl_nleft, v->spl_equiv, &LenEquiv);
+ cleanupOffsets(sv->spl_right, &sv->spl_nright, v->spl_equiv, &LenEquiv);
gistunionsubkey(giststate, itup, v, attno + 1);
- if (LenEquiv == 1 ) {
+ if (LenEquiv == 1)
+ {
/*
- * In case with one tuple we just choose left-right
- * by penalty. It's simplify user-defined pickSplit
+ * In case with one tuple we just choose left-right by
+ * penalty. It's simplify user-defined pickSplit
*/
OffsetNumber toMove = InvalidOffsetNumber;
- for(toMove=FirstOffsetNumber;toMoven;toMove++)
- if ( v->spl_equiv[ toMove ] )
+ for (toMove = FirstOffsetNumber; toMove < entryvec->n; toMove++)
+ if (v->spl_equiv[toMove])
break;
- Assert( toMove < entryvec->n );
-
- placeOne( r, giststate, v, itup[ toMove-1 ], toMove, attno+1 );
- /* redo gistunionsubkey(): it will not degradate performance,
- * because it's very rarely */
+ Assert(toMove < entryvec->n);
+
+ placeOne(r, giststate, v, itup[toMove - 1], toMove, attno + 1);
+
+ /*
+ * redo gistunionsubkey(): it will not degradate
+ * performance, because it's very rarely
+ */
v->spl_equiv = NULL;
gistunionsubkey(giststate, itup, v, attno + 1);
return false;
- } else if ( LenEquiv > 1 )
+ }
+ else if (LenEquiv > 1)
return true;
}
}
}
/*
- * simple split page
+ * simple split page
*/
static void
-gistSplitHalf(GIST_SPLITVEC *v, int len) {
- int i;
+gistSplitHalf(GIST_SPLITVEC *v, int len)
+{
+ int i;
- v->spl_nright = v->spl_nleft = 0;
+ v->spl_nright = v->spl_nleft = 0;
v->spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
- v->spl_right= (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
- for(i = 1; i <= len; i++)
- if ( i)
- v->spl_right[ v->spl_nright++ ] = i;
+ v->spl_right = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
+ for (i = 1; i <= len; i++)
+ if (i < len / 2)
+ v->spl_right[v->spl_nright++] = i;
else
- v->spl_left[ v->spl_nleft++ ] = i;
+ v->spl_left[v->spl_nleft++] = i;
}
/*
* if it was invalid tuple then we need special processing.
- * We move all invalid tuples on right page.
+ * We move all invalid tuples on right page.
*
- * if there is no place on left page, gistSplit will be called one more
+ * if there is no place on left page, gistSplit will be called one more
* time for left page.
*
* Normally, we never exec this code, but after crash replay it's possible
* to get 'invalid' tuples (probability is low enough)
*/
static void
-gistSplitByInvalid(GISTSTATE *giststate, GistSplitVector *v, IndexTuple *itup, int len) {
- int i;
- static OffsetNumber offInvTuples[ MaxOffsetNumber ];
- int nOffInvTuples = 0;
+gistSplitByInvalid(GISTSTATE *giststate, GistSplitVector *v, IndexTuple *itup, int len)
+{
+ int i;
+ static OffsetNumber offInvTuples[MaxOffsetNumber];
+ int nOffInvTuples = 0;
for (i = 1; i <= len; i++)
- if ( GistTupleIsInvalid(itup[i - 1]) )
- offInvTuples[ nOffInvTuples++ ] = i;
+ if (GistTupleIsInvalid(itup[i - 1]))
+ offInvTuples[nOffInvTuples++] = i;
- if ( nOffInvTuples == len ) {
+ if (nOffInvTuples == len)
+ {
/* corner case, all tuples are invalid */
- v->spl_rightvalid= v->spl_leftvalid = false;
- gistSplitHalf( &v->splitVector, len );
- } else {
- GistSplitUnion gsvp;
-
+ v->spl_rightvalid = v->spl_leftvalid = false;
+ gistSplitHalf(&v->splitVector, len);
+ }
+ else
+ {
+ GistSplitUnion gsvp;
+
v->splitVector.spl_right = offInvTuples;
v->splitVector.spl_nright = nOffInvTuples;
v->spl_rightvalid = false;
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;
- for(i = 1; i <= len; i++)
- if ( !GistTupleIsInvalid(itup[i - 1]) )
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = i;
+ for (i = 1; i <= len; i++)
+ if (!GistTupleIsInvalid(itup[i - 1]))
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = i;
v->spl_leftvalid = true;
-
+
gsvp.equiv = NULL;
gsvp.attr = v->spl_lattr;
gsvp.len = v->splitVector.spl_nleft;
/*
* trys to split page by attno key, in a case of null
- * values move its to separate page.
+ * values move its to separate page.
*/
void
-gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate,
- GistSplitVector *v, GistEntryVector *entryvec, int attno) {
- int i;
- static OffsetNumber offNullTuples[ MaxOffsetNumber ];
- int nOffNullTuples = 0;
+gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *giststate,
+ GistSplitVector *v, GistEntryVector *entryvec, int attno)
+{
+ int i;
+ static OffsetNumber offNullTuples[MaxOffsetNumber];
+ int nOffNullTuples = 0;
- for (i = 1; i <= len; i++) {
- Datum datum;
- bool IsNull;
+ for (i = 1; i <= len; i++)
+ {
+ Datum datum;
+ bool IsNull;
- if (!GistPageIsLeaf(page) && GistTupleIsInvalid(itup[i - 1])) {
+ if (!GistPageIsLeaf(page) && GistTupleIsInvalid(itup[i - 1]))
+ {
gistSplitByInvalid(giststate, v, itup, len);
return;
}
- datum = index_getattr(itup[i - 1], attno+1, giststate->tupdesc, &IsNull);
+ datum = index_getattr(itup[i - 1], attno + 1, giststate->tupdesc, &IsNull);
gistdentryinit(giststate, attno, &(entryvec->vector[i]),
datum, r, page, i,
FALSE, IsNull);
- if ( IsNull )
- offNullTuples[ nOffNullTuples++ ] = i;
+ if (IsNull)
+ offNullTuples[nOffNullTuples++] = i;
}
v->spl_leftvalid = v->spl_rightvalid = true;
- if ( nOffNullTuples == len ) {
- /*
+ if (nOffNullTuples == len)
+ {
+ /*
* Corner case: All keys in attno column are null, we should try to
- * split by keys in next column. It all keys in all columns
- * are NULL just split page half by half
+ * split by keys in next column. It all keys in all columns are NULL
+ * just split page half by half
*/
v->spl_risnull[attno] = v->spl_lisnull[attno] = TRUE;
- if ( attno+1 == r->rd_att->natts )
- gistSplitHalf( &v->splitVector, len );
- else
- gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno+1);
- } else if ( nOffNullTuples > 0 ) {
- int j=0;
-
- /*
- * We don't want to mix NULLs and not-NULLs keys
- * on one page, so move nulls to right page
+ if (attno + 1 == r->rd_att->natts)
+ gistSplitHalf(&v->splitVector, len);
+ else
+ gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
+ }
+ else if (nOffNullTuples > 0)
+ {
+ int j = 0;
+
+ /*
+ * We don't want to mix NULLs and not-NULLs keys on one page, so move
+ * nulls to right page
*/
v->splitVector.spl_right = offNullTuples;
v->splitVector.spl_nright = nOffNullTuples;
v->splitVector.spl_left = (OffsetNumber *) palloc(len * sizeof(OffsetNumber));
v->splitVector.spl_nleft = 0;
- for(i = 1; i <= len; i++)
- if ( jsplitVector.spl_nright && offNullTuples[j] == i )
+ for (i = 1; i <= len; i++)
+ if (j < v->splitVector.spl_nright && offNullTuples[j] == i)
j++;
else
- v->splitVector.spl_left[ v->splitVector.spl_nleft++ ] = i;
+ v->splitVector.spl_left[v->splitVector.spl_nleft++] = i;
v->spl_equiv = NULL;
gistunionsubkey(giststate, itup, v, attno);
- } else {
+ }
+ else
+ {
/*
* all keys are not-null
*/
- entryvec->n = len+1;
+ entryvec->n = len + 1;
- if ( gistUserPicksplit(r, entryvec, attno, v, itup, len, giststate) && attno+1 != r->rd_att->natts ) {
+ if (gistUserPicksplit(r, entryvec, attno, v, itup, len, giststate) && attno + 1 != r->rd_att->natts)
+ {
/*
- * Splitting on attno column is not optimized: there is a tuples which can be freely
- * left or right page, we will try to split page by
- * following columns
+ * Splitting on attno column is not optimized: there is a tuples
+ * which can be freely left or right page, we will try to split
+ * page by following columns
*/
- if ( v->spl_equiv == NULL ) {
- /* simple case: left and right keys for attno column are equial */
- gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno+1);
- } else {
+ if (v->spl_equiv == NULL)
+ {
+ /*
+ * simple case: left and right keys for attno column are
+ * equial
+ */
+ gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
+ }
+ else
+ {
/* we should clean up vector from already distributed tuples */
- IndexTuple *newitup = (IndexTuple*)palloc((len + 1) * sizeof(IndexTuple));
- OffsetNumber *map = (OffsetNumber*)palloc((len + 1) * sizeof(IndexTuple));
- int newlen = 0;
+ IndexTuple *newitup = (IndexTuple *) palloc((len + 1) * sizeof(IndexTuple));
+ OffsetNumber *map = (OffsetNumber *) palloc((len + 1) * sizeof(IndexTuple));
+ int newlen = 0;
GIST_SPLITVEC backupSplit = v->splitVector;
- for(i=0; i
- if ( v->spl_equiv[i+1] ) {
- map[ newlen ] = i+1;
- newitup[ newlen++ ] = itup[i];
+ for (i = 0; i < len; i++)
+ if (v->spl_equiv[i + 1])
+ {
+ map[newlen] = i + 1;
+ newitup[newlen++] = itup[i];
}
- Assert( newlen>0 );
+ Assert(newlen > 0);
- backupSplit.spl_left = (OffsetNumber*)palloc(sizeof(OffsetNumber)*len);
- memcpy( backupSplit.spl_left, v->splitVector.spl_left, sizeof(OffsetNumber)*v->splitVector.spl_nleft);
- backupSplit.spl_right = (OffsetNumber*)palloc(sizeof(OffsetNumber)*len);
- memcpy( backupSplit.spl_right, v->splitVector.spl_right, sizeof(OffsetNumber)*v->splitVector.spl_nright);
+ backupSplit.spl_left = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
+ memcpy(backupSplit.spl_left, v->splitVector.spl_left, sizeof(OffsetNumber) * v->splitVector.spl_nleft);
+ backupSplit.spl_right = (OffsetNumber *) palloc(sizeof(OffsetNumber) * len);
+ memcpy(backupSplit.spl_right, v->splitVector.spl_right, sizeof(OffsetNumber) * v->splitVector.spl_nright);
- gistSplitByKey(r, page, newitup, newlen, giststate, v, entryvec, attno+1);
+ gistSplitByKey(r, page, newitup, newlen, giststate, v, entryvec, attno + 1);
/* merge result of subsplit */
- for(i=0;isplitVector.spl_nleft;i++)
- backupSplit.spl_left[ backupSplit.spl_nleft++ ] = map[ v->splitVector.spl_left[i]-1 ];
- for(i=0;isplitVector.spl_nright;i++)
- backupSplit.spl_right[ backupSplit.spl_nright++ ] = map[ v->splitVector.spl_right[i]-1 ];
+ for (i = 0; i < v->splitVector.spl_nleft; i++)
+ backupSplit.spl_left[backupSplit.spl_nleft++] = map[v->splitVector.spl_left[i] - 1];
+ for (i = 0; i < v->splitVector.spl_nright; i++)
+ backupSplit.spl_right[backupSplit.spl_nright++] = map[v->splitVector.spl_right[i] - 1];
v->splitVector = backupSplit;
/* reunion left and right datums */
gistunionsubkey(giststate, itup, v, attno);
}
- }
+ }
}
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.19 2006/07/14 14:52:16 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.20 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
* static *S used for temrorary storage (saves stack and palloc() call)
*/
-static Datum attrS[INDEX_MAX_KEYS];
-static bool isnullS[INDEX_MAX_KEYS];
+static Datum attrS[INDEX_MAX_KEYS];
+static bool isnullS[INDEX_MAX_KEYS];
/*
* Write itup vector to page, has no control of free space
bool
gistnospace(Page page, IndexTuple *itvec, int len, OffsetNumber todelete, Size freespace)
{
- unsigned int size = freespace, deleted = 0;
+ unsigned int size = freespace,
+ deleted = 0;
int i;
for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
- if ( todelete != InvalidOffsetNumber ) {
- IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, todelete));
+ if (todelete != InvalidOffsetNumber)
+ {
+ IndexTuple itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, todelete));
+
deleted = IndexTupleSize(itup) + sizeof(ItemIdData);
}
}
bool
-gistfitpage(IndexTuple *itvec, int len) {
- int i;
- Size size=0;
+gistfitpage(IndexTuple *itvec, int len)
+{
+ int i;
+ Size size = 0;
- for(i=0;ii++)
+ for (i = 0; i < len; i++)
size += IndexTupleSize(itvec[i]) + sizeof(ItemIdData);
/* TODO: Consider fillfactor */
*/
IndexTupleData *
-gistfillitupvec(IndexTuple *vec, int veclen, int *memlen) {
- char *ptr, *ret;
- int i;
+gistfillitupvec(IndexTuple *vec, int veclen, int *memlen)
+{
+ char *ptr,
+ *ret;
+ int i;
+
+ *memlen = 0;
- *memlen=0;
-
for (i = 0; i < veclen; i++)
*memlen += IndexTupleSize(vec[i]);
ptr = ret = palloc(*memlen);
- for (i = 0; i < veclen; i++) {
+ for (i = 0; i < veclen; i++)
+ {
memcpy(ptr, vec[i], IndexTupleSize(vec[i]));
ptr += IndexTupleSize(vec[i]);
}
- return (IndexTupleData*)ret;
+ return (IndexTupleData *) ret;
}
/*
- * Make unions of keys in IndexTuple vector, return FALSE if itvec contains
+ * Make unions of keys in IndexTuple vector, return FALSE if itvec contains
* invalid tuple. Resulting Datums aren't compressed.
*/
-bool
-gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startkey,
- Datum *attr, bool *isnull ) {
+bool
+gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startkey,
+ Datum *attr, bool *isnull)
+{
int i;
GistEntryVector *evec;
- int attrsize;
+ int attrsize;
- evec = (GistEntryVector *) palloc( ( len + 2 ) * sizeof(GISTENTRY) + GEVHDRSZ);
+ evec = (GistEntryVector *) palloc((len + 2) * sizeof(GISTENTRY) + GEVHDRSZ);
- for (i = startkey; i < giststate->tupdesc->natts; i++) {
- int j;
+ for (i = startkey; i < giststate->tupdesc->natts; i++)
+ {
+ int j;
evec->n = 0;
- if ( !isnull[i] ) {
- gistentryinit( evec->vector[evec->n], attr[i],
- NULL, NULL, (OffsetNumber) 0,
- FALSE);
+ if (!isnull[i])
+ {
+ gistentryinit(evec->vector[evec->n], attr[i],
+ NULL, NULL, (OffsetNumber) 0,
+ FALSE);
evec->n++;
}
- for (j = 0; j < len; j++) {
- Datum datum;
- bool IsNull;
+ for (j = 0; j < len; j++)
+ {
+ Datum datum;
+ bool IsNull;
- if (GistTupleIsInvalid(itvec[j]))
- return FALSE; /* signals that union with invalid tuple => result is invalid */
+ if (GistTupleIsInvalid(itvec[j]))
+ return FALSE; /* signals that union with invalid tuple =>
+ * result is invalid */
datum = index_getattr(itvec[j], i + 1, giststate->tupdesc, &IsNull);
if (IsNull)
}
/* If this tuple vector was all NULLs, the union is NULL */
- if ( evec->n == 0 ) {
+ if (evec->n == 0)
+ {
attr[i] = (Datum) 0;
isnull[i] = TRUE;
- } else {
- if (evec->n == 1) {
+ }
+ else
+ {
+ if (evec->n == 1)
+ {
evec->n = 2;
evec->vector[1] = evec->vector[0];
- }
+ }
/* Make union and store in attr array */
attr[i] = FunctionCall2(&giststate->unionFn[i],
- PointerGetDatum(evec),
- PointerGetDatum(&attrsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&attrsize));
isnull[i] = FALSE;
}
{
memset(isnullS, TRUE, sizeof(bool) * giststate->tupdesc->natts);
- if ( !gistMakeUnionItVec(giststate, itvec, len, 0, attrS, isnullS ) )
- return gist_form_invalid_tuple(InvalidBlockNumber);
+ if (!gistMakeUnionItVec(giststate, itvec, len, 0, attrS, isnullS))
+ return gist_form_invalid_tuple(InvalidBlockNumber);
- return gistFormTuple(giststate, r, attrS, isnullS, false);
+ return gistFormTuple(giststate, r, attrS, isnullS, false);
}
-/*
+/*
* makes union of two key
*/
void
-gistMakeUnionKey( GISTSTATE *giststate, int attno,
- GISTENTRY *entry1, bool isnull1,
- GISTENTRY *entry2, bool isnull2,
- Datum *dst, bool *dstisnull ) {
+gistMakeUnionKey(GISTSTATE *giststate, int attno,
+ GISTENTRY *entry1, bool isnull1,
+ GISTENTRY *entry2, bool isnull2,
+ Datum *dst, bool *dstisnull)
+{
- int dstsize;
+ int dstsize;
- static char storage[ 2 * sizeof(GISTENTRY) + GEVHDRSZ ];
- GistEntryVector *evec = (GistEntryVector*)storage;
+ static char storage[2 * sizeof(GISTENTRY) + GEVHDRSZ];
+ GistEntryVector *evec = (GistEntryVector *) storage;
evec->n = 2;
- if ( isnull1 && isnull2 ) {
+ if (isnull1 && isnull2)
+ {
*dstisnull = TRUE;
- *dst = (Datum)0;
- } else {
- if ( isnull1 == FALSE && isnull2 == FALSE ) {
+ *dst = (Datum) 0;
+ }
+ else
+ {
+ if (isnull1 == FALSE && isnull2 == FALSE)
+ {
evec->vector[0] = *entry1;
evec->vector[1] = *entry2;
- } else if ( isnull1 == FALSE ) {
+ }
+ else if (isnull1 == FALSE)
+ {
evec->vector[0] = *entry1;
evec->vector[1] = *entry1;
- } else {
+ }
+ else
+ {
evec->vector[0] = *entry2;
evec->vector[1] = *entry2;
}
*dstisnull = FALSE;
*dst = FunctionCall2(&giststate->unionFn[attno],
- PointerGetDatum(evec),
- PointerGetDatum(&dstsize));
+ PointerGetDatum(evec),
+ PointerGetDatum(&dstsize));
}
}
bool
-gistKeyIsEQ(GISTSTATE *giststate, int attno, Datum a, Datum b) {
- bool result;
+gistKeyIsEQ(GISTSTATE *giststate, int attno, Datum a, Datum b)
+{
+ bool result;
FunctionCall3(&giststate->equalFn[attno],
- a, b,
- PointerGetDatum(&result));
+ a, b,
+ PointerGetDatum(&result));
return result;
}
gistDeCompressAtt(giststate, r, addtup, NULL,
(OffsetNumber) 0, addentries, addisnull);
- for(i = 0; i < r->rd_att->natts; i++) {
- gistMakeUnionKey( giststate, i,
- oldentries + i, oldisnull[i],
- addentries + i, addisnull[i],
- attrS + i, isnullS + i );
+ for (i = 0; i < r->rd_att->natts; i++)
+ {
+ gistMakeUnionKey(giststate, i,
+ oldentries + i, oldisnull[i],
+ addentries + i, addisnull[i],
+ attrS + i, isnullS + i);
- if ( neednew )
+ if (neednew)
/* we already need new key, so we can skip check */
continue;
- if ( isnullS[i] )
+ if (isnullS[i])
/* union of key may be NULL if and only if both keys are NULL */
continue;
- if ( !addisnull[i] ) {
- if ( oldisnull[i] || gistKeyIsEQ(giststate, i, oldentries[i].key, attrS[i])==false )
+ if (!addisnull[i])
+ {
+ if (oldisnull[i] || gistKeyIsEQ(giststate, i, oldentries[i].key, attrS[i]) == false)
neednew = true;
}
}
it, NULL, (OffsetNumber) 0,
identry, isnull);
- Assert( maxoff >= FirstOffsetNumber );
- Assert( !GistPageIsLeaf(p) );
+ Assert(maxoff >= FirstOffsetNumber);
+ Assert(!GistPageIsLeaf(p));
for (i = FirstOffsetNumber; i <= maxoff && sum_grow; i = OffsetNumberNext(i))
{
{
gistcentryinit(giststate, i, ¢ry[i], attdata[i],
r, NULL, (OffsetNumber) 0,
- newValues,
+ newValues,
FALSE);
compatt[i] = centry[i].key;
}
GISTENTRY *orig, bool isNullOrig,
GISTENTRY *add, bool isNullAdd)
{
- float penalty = 0.0;
+ float penalty = 0.0;
- if ( giststate->penaltyFn[attno].fn_strict==FALSE || ( isNullOrig == FALSE && isNullAdd == FALSE ) )
+ if (giststate->penaltyFn[attno].fn_strict == FALSE || (isNullOrig == FALSE && isNullAdd == FALSE))
FunctionCall3(&giststate->penaltyFn[attno],
PointerGetDatum(orig),
PointerGetDatum(add),
PointerGetDatum(&penalty));
- else if ( isNullOrig && isNullAdd )
+ else if (isNullOrig && isNullAdd)
penalty = 0.0;
else
- penalty = 1e10; /* try to prevent to mix null and non-null value */
-
+ penalty = 1e10; /* try to prevent to mix null and non-null
+ * value */
+
return penalty;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.27 2006/09/21 20:31:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.28 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} ArrayTuple;
/*
- * Make union of keys on page
+ * Make union of keys on page
*/
static IndexTuple
-PageMakeUnionKey(GistVacuum *gv, Buffer buffer) {
- Page page = BufferGetPage( buffer );
+PageMakeUnionKey(GistVacuum *gv, Buffer buffer)
+{
+ Page page = BufferGetPage(buffer);
IndexTuple *vec,
- tmp, res;
+ tmp,
+ res;
int veclen = 0;
MemoryContext oldCtx = MemoryContextSwitchTo(gv->opCtx);
vec = gistextractpage(page, &veclen);
- /* we call gistunion() in temprorary context because user-defined functions called in gistunion()
- may do not free all memory */
+
+ /*
+ * we call gistunion() in temprorary context because user-defined
+ * functions called in gistunion() may do not free all memory
+ */
tmp = gistunion(gv->index, vec, veclen, &(gv->giststate));
MemoryContextSwitchTo(oldCtx);
}
static void
-gistDeleteSubtree( GistVacuum *gv, BlockNumber blkno ) {
- Buffer buffer;
- Page page;
+gistDeleteSubtree(GistVacuum *gv, BlockNumber blkno)
+{
+ Buffer buffer;
+ Page page;
buffer = ReadBuffer(gv->index, blkno);
LockBuffer(buffer, GIST_EXCLUSIVE);
page = (Page) BufferGetPage(buffer);
- if ( !GistPageIsLeaf(page) ) {
- int i;
+ if (!GistPageIsLeaf(page))
+ {
+ int i;
+
+ for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i = OffsetNumberNext(i))
+ {
+ ItemId iid = PageGetItemId(page, i);
+ IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- for (i = FirstOffsetNumber; i <= PageGetMaxOffsetNumber(page); i = OffsetNumberNext(i)) {
- ItemId iid = PageGetItemId(page, i);
- IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
- gistDeleteSubtree(gv, ItemPointerGetBlockNumber(&(idxtuple->t_tid)));
+ gistDeleteSubtree(gv, ItemPointerGetBlockNumber(&(idxtuple->t_tid)));
}
}
{
XLogRecData rdata[2];
XLogRecPtr recptr;
- gistxlogPageDelete xlrec;
+ gistxlogPageDelete xlrec;
xlrec.node = gv->index->rd_node;
xlrec.blkno = blkno;
}
else
PageSetLSN(page, XLogRecPtrForTemp);
-
+
END_CRIT_SECTION();
UnlockReleaseBuffer(buffer);
}
-static Page
-GistPageGetCopyPage( Page page ) {
- Size pageSize = PageGetPageSize( page );
- Page tmppage;
+static Page
+GistPageGetCopyPage(Page page)
+{
+ Size pageSize = PageGetPageSize(page);
+ Page tmppage;
- tmppage=(Page)palloc( pageSize );
- memcpy( tmppage, page, pageSize );
+ tmppage = (Page) palloc(pageSize);
+ memcpy(tmppage, page, pageSize);
return tmppage;
}
static ArrayTuple
-vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon, int curlenaddon) {
+vacuumSplitPage(GistVacuum *gv, Page tempPage, Buffer buffer, IndexTuple *addon, int curlenaddon)
+{
ArrayTuple res = {NULL, 0, false};
IndexTuple *vec;
SplitedPageLayout *dist = NULL,
- *ptr;
- int i, veclen=0;
- BlockNumber blkno = BufferGetBlockNumber(buffer);
+ *ptr;
+ int i,
+ veclen = 0;
+ BlockNumber blkno = BufferGetBlockNumber(buffer);
MemoryContext oldCtx = MemoryContextSwitchTo(gv->opCtx);
vec = gistextractpage(tempPage, &veclen);
MemoryContextSwitchTo(oldCtx);
- if (blkno != GIST_ROOT_BLKNO) {
+ if (blkno != GIST_ROOT_BLKNO)
+ {
/* if non-root split then we should not allocate new buffer */
dist->buffer = buffer;
dist->page = tempPage;
/* during vacuum we never split leaf page */
GistPageGetOpaque(dist->page)->flags = 0;
- } else
+ }
+ else
pfree(tempPage);
res.itup = (IndexTuple *) palloc(sizeof(IndexTuple) * veclen);
res.ituplen = 0;
/* make new pages and fills them */
- for (ptr = dist; ptr; ptr = ptr->next) {
- char *data;
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
+ char *data;
- if ( ptr->buffer == InvalidBuffer ) {
- ptr->buffer = gistNewBuffer( gv->index );
- GISTInitBuffer( ptr->buffer, 0 );
+ if (ptr->buffer == InvalidBuffer)
+ {
+ ptr->buffer = gistNewBuffer(gv->index);
+ GISTInitBuffer(ptr->buffer, 0);
ptr->page = BufferGetPage(ptr->buffer);
}
- ptr->block.blkno = BufferGetBlockNumber( ptr->buffer );
+ ptr->block.blkno = BufferGetBlockNumber(ptr->buffer);
- data = (char*)(ptr->list);
- for(i=0;i
block.num;i++) {
- if ( PageAddItem(ptr->page, (Item)data, IndexTupleSize((IndexTuple)data), i+FirstOffsetNumber, LP_USED) == InvalidOffsetNumber )
+ data = (char *) (ptr->list);
+ for (i = 0; i < ptr->block.num; i++)
+ {
+ if (PageAddItem(ptr->page, (Item) data, IndexTupleSize((IndexTuple) data), i + FirstOffsetNumber, LP_USED) == InvalidOffsetNumber)
elog(ERROR, "failed to add item to index page in \"%s\"", RelationGetRelationName(gv->index));
- data += IndexTupleSize((IndexTuple)data);
+ data += IndexTupleSize((IndexTuple) data);
}
ItemPointerSetBlockNumber(&(ptr->itup->t_tid), ptr->block.blkno);
- res.itup[ res.ituplen ] = (IndexTuple)palloc(IndexTupleSize(ptr->itup));
- memcpy( res.itup[ res.ituplen ], ptr->itup, IndexTupleSize(ptr->itup) );
+ res.itup[res.ituplen] = (IndexTuple) palloc(IndexTupleSize(ptr->itup));
+ memcpy(res.itup[res.ituplen], ptr->itup, IndexTupleSize(ptr->itup));
res.ituplen++;
}
START_CRIT_SECTION();
- for (ptr = dist; ptr; ptr = ptr->next) {
+ for (ptr = dist; ptr; ptr = ptr->next)
+ {
MarkBufferDirty(ptr->buffer);
GistPageGetOpaque(ptr->page)->rightlink = InvalidBlockNumber;
}
/* restore splitted non-root page */
- if (blkno != GIST_ROOT_BLKNO) {
- PageRestoreTempPage( dist->page, BufferGetPage( dist->buffer ) );
- dist->page = BufferGetPage( dist->buffer );
+ if (blkno != GIST_ROOT_BLKNO)
+ {
+ PageRestoreTempPage(dist->page, BufferGetPage(dist->buffer));
+ dist->page = BufferGetPage(dist->buffer);
}
if (!gv->index->rd_istemp)
{
XLogRecPtr recptr;
XLogRecData *rdata;
- ItemPointerData key; /* set key for incomplete
- * insert */
+ ItemPointerData key; /* set key for incomplete insert */
char *xlinfo;
ItemPointerSet(&key, blkno, TUPLE_IS_VALID);
rdata = formSplitRdata(gv->index->rd_node, blkno,
- false, &key, dist);
+ false, &key, dist);
xlinfo = rdata->data;
recptr = XLogInsert(RM_GIST_ID, XLOG_GIST_PAGE_SPLIT, rdata);
{
/* we must keep the buffer pin on the head page */
if (BufferGetBlockNumber(ptr->buffer) != blkno)
- UnlockReleaseBuffer( ptr->buffer );
+ UnlockReleaseBuffer(ptr->buffer);
}
if (blkno == GIST_ROOT_BLKNO)
{
- ItemPointerData key; /* set key for incomplete
- * insert */
+ ItemPointerData key; /* set key for incomplete insert */
ItemPointerSet(&key, blkno, TUPLE_IS_VALID);
{
ArrayTuple res = {NULL, 0, false};
Buffer buffer;
- Page page, tempPage = NULL;
+ Page page,
+ tempPage = NULL;
OffsetNumber i,
maxoff;
ItemId iid;
*addon = NULL;
bool needwrite = false;
OffsetNumber offToDelete[MaxOffsetNumber];
- BlockNumber blkToDelete[MaxOffsetNumber];
+ BlockNumber blkToDelete[MaxOffsetNumber];
ItemPointerData *completed = NULL;
int ncompleted = 0,
lencompleted = 16;
if (chldtuple.ituplen || chldtuple.emptypage)
{
/* update tuple or/and inserts new */
- if ( chldtuple.emptypage )
+ if (chldtuple.emptypage)
blkToDelete[nBlkToDelete++] = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
offToDelete[nOffToDelete++] = i;
PageIndexTupleDelete(tempPage, i);
if (chldtuple.ituplen)
{
- Assert( chldtuple.emptypage == false );
+ Assert(chldtuple.emptypage == false);
while (curlenaddon + chldtuple.ituplen >= lenaddon)
{
lenaddon *= 2;
}
}
}
-
- Assert( maxoff == PageGetMaxOffsetNumber(tempPage) );
+
+ Assert(maxoff == PageGetMaxOffsetNumber(tempPage));
if (curlenaddon)
{
/* insert updated tuples */
- if (gistnospace(tempPage, addon, curlenaddon, InvalidOffsetNumber, 0)) {
+ if (gistnospace(tempPage, addon, curlenaddon, InvalidOffsetNumber, 0))
+ {
/* there is no space on page to insert tuples */
res = vacuumSplitPage(gv, tempPage, buffer, addon, curlenaddon);
- tempPage=NULL; /* vacuumSplitPage() free tempPage */
- needwrite = needunion = false; /* gistSplit already forms unions and writes pages */
- } else
+ tempPage = NULL; /* vacuumSplitPage() free tempPage */
+ needwrite = needunion = false; /* gistSplit already forms
+ * unions and writes pages */
+ }
+ else
/* enough free space */
gistfillbuffer(gv->index, tempPage, addon, curlenaddon, InvalidOffsetNumber);
}
}
- /*
- * If page is empty, we should remove pointer to it before
- * deleting page (except root)
+ /*
+ * If page is empty, we should remove pointer to it before deleting page
+ * (except root)
*/
- if ( blkno != GIST_ROOT_BLKNO && ( PageIsEmpty(page) || (tempPage && PageIsEmpty(tempPage)) ) ) {
+ if (blkno != GIST_ROOT_BLKNO && (PageIsEmpty(page) || (tempPage && PageIsEmpty(tempPage))))
+ {
/*
- * New version of page is empty, so leave it unchanged,
- * upper call will mark our page as deleted.
- * In case of page split we never will be here...
+ * New version of page is empty, so leave it unchanged, upper call
+ * will mark our page as deleted. In case of page split we never will
+ * be here...
*
- * If page was empty it can't become non-empty during processing
+ * If page was empty it can't become non-empty during processing
*/
res.emptypage = true;
UnlockReleaseBuffer(buffer);
- } else {
+ }
+ else
+ {
/* write page and remove its childs if it need */
START_CRIT_SECTION();
- if ( tempPage && needwrite ) {
+ if (tempPage && needwrite)
+ {
PageRestoreTempPage(tempPage, page);
tempPage = NULL;
}
- /* Empty index */
- if (PageIsEmpty(page) && blkno == GIST_ROOT_BLKNO )
+ /* Empty index */
+ if (PageIsEmpty(page) && blkno == GIST_ROOT_BLKNO)
{
needwrite = true;
GistPageSetLeaf(page);
}
-
+
if (needwrite)
{
MarkBufferDirty(buffer);
END_CRIT_SECTION();
- if ( needunion && !PageIsEmpty(page) )
+ if (needunion && !PageIsEmpty(page))
{
res.itup = (IndexTuple *) palloc(sizeof(IndexTuple));
res.ituplen = 1;
UnlockReleaseBuffer(buffer);
/* delete empty children, now we havn't any links to pointed subtrees */
- for(i=0;i
+ for (i = 0; i < nBlkToDelete; i++)
gistDeleteSubtree(gv, blkToDelete[i]);
if (ncompleted && !gv->index->rd_istemp)
/* use heap's tuple count */
Assert(info->num_heap_tuples >= 0);
stats->std.num_index_tuples = info->num_heap_tuples;
+
/*
- * XXX the above is wrong if index is partial. Would it be OK to
- * just return NULL, or is there work we must do below?
+ * XXX the above is wrong if index is partial. Would it be OK to just
+ * return NULL, or is there work we must do below?
*/
}
RelationGetRelationName(rel))));
/*
- * If vacuum full, we already have exclusive lock on the index.
- * Otherwise, need lock unless it's local to this backend.
+ * If vacuum full, we already have exclusive lock on the index. Otherwise,
+ * need lock unless it's local to this backend.
*/
if (info->vacuum_full)
needLock = false;
if (callback(&(idxtuple->t_tid), callback_state))
{
- todelete[ntodelete] = i-ntodelete;
+ todelete[ntodelete] = i - ntodelete;
ntodelete++;
stats->std.tuples_removed += 1;
}
MarkBufferDirty(buffer);
- for(i=0;ii++)
+ for (i = 0; i < ntodelete; i++)
PageIndexTupleDelete(page, todelete[i]);
GistMarkTuplesDeleted(page);
gistxlogPageUpdate *xlinfo;
rdata = formUpdateRdata(rel->rd_node, buffer,
- todelete, ntodelete,
+ todelete, ntodelete,
NULL, 0,
NULL);
xlinfo = (gistxlogPageUpdate *) rdata->next->data;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistxlog.c,v 1.23 2006/08/07 16:57:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistxlog.c,v 1.24 2006/10/04 00:29:48 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
static MemoryContext opCtx; /* working memory for operations */
-static MemoryContext insertCtx; /* holds incomplete_inserts list */
+static MemoryContext insertCtx; /* holds incomplete_inserts list */
static List *incomplete_inserts;
-#define ItemPointerEQ(a, b) \
+#define ItemPointerEQ(a, b) \
( ItemPointerGetOffsetNumber(a) == ItemPointerGetOffsetNumber(b) && \
ItemPointerGetBlockNumber (a) == ItemPointerGetBlockNumber(b) )
MemoryContext oldCxt;
gistIncompleteInsert *ninsert;
- if ( !ItemPointerIsValid(&key) )
- /*
+ if (!ItemPointerIsValid(&key))
+
+ /*
* if key is null then we should not store insertion as incomplete,
* because it's a vacuum operation..
*/
/*
* Stick the new incomplete insert onto the front of the list, not the
- * back. This is so that gist_xlog_cleanup will process incompletions
- * in last-in-first-out order.
+ * back. This is so that gist_xlog_cleanup will process incompletions in
+ * last-in-first-out order.
*/
incomplete_inserts = lcons(ninsert, incomplete_inserts);
{
ListCell *l;
- if ( !ItemPointerIsValid(&key) )
+ if (!ItemPointerIsValid(&key))
return;
- if (incomplete_inserts==NIL)
+ if (incomplete_inserts == NIL)
return;
foreach(l, incomplete_inserts)
if (GistPageIsLeaf(page) && xlrec.len == 0 && xlrec.data->ntodelete == 0)
GistClearTuplesDeleted(page);
- if ( !GistPageIsLeaf(page) && PageGetMaxOffsetNumber(page) == InvalidOffsetNumber && xldata->blkno == GIST_ROOT_BLKNO )
- /* all links on non-leaf root page was deleted by vacuum full,
- so root page becomes a leaf */
+ if (!GistPageIsLeaf(page) && PageGetMaxOffsetNumber(page) == InvalidOffsetNumber && xldata->blkno == GIST_ROOT_BLKNO)
+
+ /*
+ * all links on non-leaf root page was deleted by vacuum full, so root
+ * page becomes a leaf
+ */
GistPageSetLeaf(page);
GistPageGetOpaque(page)->rightlink = InvalidBlockNumber;
out_target(StringInfo buf, RelFileNode node, ItemPointerData key)
{
appendStringInfo(buf, "rel %u/%u/%u",
- node.spcNode, node.dbNode, node.relNode);
- if ( ItemPointerIsValid( &key ) )
+ node.spcNode, node.dbNode, node.relNode);
+ if (ItemPointerIsValid(&key))
appendStringInfo(buf, "; tid %u/%u",
- ItemPointerGetBlockNumber(&key),
- ItemPointerGetOffsetNumber(&key));
+ ItemPointerGetBlockNumber(&key),
+ ItemPointerGetOffsetNumber(&key));
}
static void
out_gistxlogPageDelete(StringInfo buf, gistxlogPageDelete *xlrec)
{
appendStringInfo(buf, "page_delete: rel %u/%u/%u; blkno %u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
- xlrec->blkno);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
+ xlrec->blkno);
}
static void
appendStringInfo(buf, "page_split: ");
out_target(buf, xlrec->node, xlrec->key);
appendStringInfo(buf, "; block number %u splits to %d pages",
- xlrec->origblkno, xlrec->npage);
+ xlrec->origblkno, xlrec->npage);
}
void
break;
case XLOG_GIST_CREATE_INDEX:
appendStringInfo(buf, "create_index: rel %u/%u/%u",
- ((RelFileNode *) rec)->spcNode,
- ((RelFileNode *) rec)->dbNode,
- ((RelFileNode *) rec)->relNode);
+ ((RelFileNode *) rec)->spcNode,
+ ((RelFileNode *) rec)->dbNode,
+ ((RelFileNode *) rec)->relNode);
break;
case XLOG_GIST_INSERT_COMPLETE:
appendStringInfo(buf, "complete_insert: rel %u/%u/%u",
- ((gistxlogInsertComplete *) rec)->node.spcNode,
- ((gistxlogInsertComplete *) rec)->node.dbNode,
- ((gistxlogInsertComplete *) rec)->node.relNode);
+ ((gistxlogInsertComplete *) rec)->node.spcNode,
+ ((gistxlogInsertComplete *) rec)->node.dbNode,
+ ((gistxlogInsertComplete *) rec)->node.relNode);
break;
default:
appendStringInfo(buf, "unknown gist op code %u", info);
elog(ERROR, "lost parent for block %u", insert->origblkno);
}
-static SplitedPageLayout*
-gistMakePageLayout(Buffer *buffers, int nbuffers) {
- SplitedPageLayout *res=NULL, *resptr;
+static SplitedPageLayout *
+gistMakePageLayout(Buffer *buffers, int nbuffers)
+{
+ SplitedPageLayout *res = NULL,
+ *resptr;
- while( nbuffers-- > 0 ) {
- Page page = BufferGetPage( buffers[ nbuffers ] );
- IndexTuple* vec;
- int veclen;
+ while (nbuffers-- > 0)
+ {
+ Page page = BufferGetPage(buffers[nbuffers]);
+ IndexTuple *vec;
+ int veclen;
- resptr = (SplitedPageLayout*)palloc0( sizeof(SplitedPageLayout) );
+ resptr = (SplitedPageLayout *) palloc0(sizeof(SplitedPageLayout));
- resptr->block.blkno = BufferGetBlockNumber( buffers[ nbuffers ] );
- resptr->block.num = PageGetMaxOffsetNumber( page );
+ resptr->block.blkno = BufferGetBlockNumber(buffers[nbuffers]);
+ resptr->block.num = PageGetMaxOffsetNumber(page);
- vec = gistextractpage( page, &veclen );
- resptr->list = gistfillitupvec( vec, veclen, &(resptr->lenlist) );
+ vec = gistextractpage(page, &veclen);
+ resptr->list = gistfillitupvec(vec, veclen, &(resptr->lenlist));
resptr->next = res;
res = resptr;
* Note that we assume the index is now in a valid state, except for the
* unfinished insertion. In particular it's safe to invoke gistFindPath();
* there shouldn't be any garbage pages for it to run into.
- *
+ *
* To complete insert we can't use basic insertion algorithm because
* during insertion we can't call user-defined support functions of opclass.
* So, we insert 'invalid' tuples without real key and do it by separate algorithm.
itup[i] = gist_form_invalid_tuple(insert->blkno[i]);
/*
- * any insertion of itup[] should make LOG message about
+ * any insertion of itup[] should make LOG message about
*/
if (insert->origblkno == GIST_ROOT_BLKNO)
Buffer *buffers;
Page *pages;
int numbuffer;
- OffsetNumber *todelete;
+ OffsetNumber *todelete;
/* construct path */
gistxlogFindPath(index, insert);
int j,
k,
pituplen = 0;
- XLogRecData *rdata;
- XLogRecPtr recptr;
- Buffer tempbuffer = InvalidBuffer;
- int ntodelete = 0;
+ XLogRecData *rdata;
+ XLogRecPtr recptr;
+ Buffer tempbuffer = InvalidBuffer;
+ int ntodelete = 0;
numbuffer = 1;
buffers[0] = ReadBuffer(index, insert->path[i]);
LockBuffer(buffers[0], GIST_EXCLUSIVE);
+
/*
* we check buffer, because we restored page earlier
*/
gistcheckpage(index, buffers[0]);
pages[0] = BufferGetPage(buffers[0]);
- Assert( !GistPageIsLeaf(pages[0]) );
+ Assert(!GistPageIsLeaf(pages[0]));
pituplen = PageGetMaxOffsetNumber(pages[0]);
}
}
- if ( ntodelete == 0 )
- elog(PANIC,"gistContinueInsert: can't find pointer to page(s)");
+ if (ntodelete == 0)
+ elog(PANIC, "gistContinueInsert: can't find pointer to page(s)");
/*
- * we check space with subtraction only first tuple to delete, hope,
- * that wiil be enough space....
+ * we check space with subtraction only first tuple to delete,
+ * hope, that wiil be enough space....
*/
if (gistnospace(pages[0], itup, lenitup, *todelete, 0))
if (BufferGetBlockNumber(buffers[0]) == GIST_ROOT_BLKNO)
{
- Buffer tmp;
+ Buffer tmp;
/*
* we split root, just copy content from root to new page
/* fill new page, root will be changed later */
tempbuffer = ReadBuffer(index, P_NEW);
LockBuffer(tempbuffer, GIST_EXCLUSIVE);
- memcpy( BufferGetPage(tempbuffer), pages[0], BufferGetPageSize(tempbuffer) );
+ memcpy(BufferGetPage(tempbuffer), pages[0], BufferGetPageSize(tempbuffer));
/* swap buffers[0] (was root) and temp buffer */
tmp = buffers[0];
buffers[0] = tempbuffer;
- tempbuffer = tmp; /* now in tempbuffer GIST_ROOT_BLKNO, it is still unchanged */
+ tempbuffer = tmp; /* now in tempbuffer GIST_ROOT_BLKNO,
+ * it is still unchanged */
pages[0] = BufferGetPage(buffers[0]);
}
START_CRIT_SECTION();
- for(j=0;jj++)
+ for (j = 0; j < ntodelete; j++)
PageIndexTupleDelete(pages[0], todelete[j]);
rdata = formSplitRdata(index->rd_node, insert->path[i],
- false, &(insert->key),
- gistMakePageLayout( buffers, numbuffer ) );
+ false, &(insert->key),
+ gistMakePageLayout(buffers, numbuffer));
- } else {
+ }
+ else
+ {
START_CRIT_SECTION();
- for(j=0;jj++)
+ for (j = 0; j < ntodelete; j++)
PageIndexTupleDelete(pages[0], todelete[j]);
gistfillbuffer(index, pages[0], itup, lenitup, InvalidOffsetNumber);
- rdata = formUpdateRdata(index->rd_node, buffers[0],
- todelete, ntodelete,
- itup, lenitup, &(insert->key));
+ rdata = formUpdateRdata(index->rd_node, buffers[0],
+ todelete, ntodelete,
+ itup, lenitup, &(insert->key));
}
- /*
- * use insert->key as mark for completion of insert (form*Rdata() above)
- * for following possible replays
+ /*
+ * use insert->key as mark for completion of insert (form*Rdata()
+ * above) for following possible replays
*/
/* write pages, we should mark it dirty befor XLogInsert() */
- for (j = 0; j < numbuffer; j++) {
+ for (j = 0; j < numbuffer; j++)
+ {
GistPageGetOpaque(pages[j])->rightlink = InvalidBlockNumber;
MarkBufferDirty(buffers[j]);
}
END_CRIT_SECTION();
lenitup = numbuffer;
- for (j = 0; j < numbuffer; j++) {
+ for (j = 0; j < numbuffer; j++)
+ {
itup[j] = gist_form_invalid_tuple(BufferGetBlockNumber(buffers[j]));
UnlockReleaseBuffer(buffers[j]);
}
- if ( tempbuffer != InvalidBuffer ) {
+ if (tempbuffer != InvalidBuffer)
+ {
/*
* it was a root split, so fill it by new values
*/
}
ereport(LOG,
- (errmsg("index %u/%u/%u needs VACUUM FULL or REINDEX to finish crash recovery",
+ (errmsg("index %u/%u/%u needs VACUUM FULL or REINDEX to finish crash recovery",
insert->node.spcNode, insert->node.dbNode, insert->node.relNode),
- errdetail("Incomplete insertion detected during crash replay.")));
+ errdetail("Incomplete insertion detected during crash replay.")));
}
void
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.47 2006/03/05 15:58:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashfunc.c,v 1.48 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* These functions are stored in pg_amproc. For each operator class
Datum result;
/*
- * Note: this is currently identical in behavior to hashvarlena, but
- * keep it as a separate function in case we someday want to do something
- * different in non-C locales. (See also hashbpchar, if so.)
+ * Note: this is currently identical in behavior to hashvarlena, but keep
+ * it as a separate function in case we someday want to do something
+ * different in non-C locales. (See also hashbpchar, if so.)
*/
result = hash_any((unsigned char *) VARDATA(key),
VARSIZE(key) - VARHDRSZ);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.59 2006/07/03 22:45:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.60 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
/*
* Determine the target fill factor (in tuples per bucket) for this index.
* The idea is to make the fill factor correspond to pages about as full
- * as the user-settable fillfactor parameter says. We can compute it
+ * as the user-settable fillfactor parameter says. We can compute it
* exactly if the index datatype is fixed-width, but for var-width there's
* some guessing involved.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.219 2006/08/18 16:09:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.220 2006/10/04 00:29:48 momjian Exp $
*
*
* INTERFACE ROUTINES
snapshot = scan->rs_snapshot;
/*
- * We must hold share lock on the buffer content while examining
- * tuple visibility. Afterwards, however, the tuples we have found
- * to be visible are guaranteed good as long as we hold the buffer pin.
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
tuple->t_data = NULL;
return;
}
- page = 0; /* first page */
+ page = 0; /* first page */
heapgetpage(scan, page);
lineoff = FirstOffsetNumber; /* first offnum */
scan->rs_inited = true;
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
- lineoff = /* next offnum */
+ page = scan->rs_cblock; /* current page */
+ lineoff = /* next offnum */
OffsetNumberNext(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
}
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE);
if (!scan->rs_inited)
{
- lineoff = lines; /* final offnum */
+ lineoff = lines; /* final offnum */
scan->rs_inited = true;
}
else
{
- lineoff = /* previous offnum */
+ lineoff = /* previous offnum */
OffsetNumberPrev(ItemPointerGetOffsetNumber(&(tuple->t_self)));
}
/* page and lineoff now reference the physically previous tid */
tuple->t_data = NULL;
return;
}
- page = 0; /* first page */
+ page = 0; /* first page */
heapgetpage(scan, page);
lineindex = 0;
scan->rs_inited = true;
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
lineindex = scan->rs_cindex + 1;
}
else
{
/* continue from previously returned page/tuple */
- page = scan->rs_cblock; /* current page */
+ page = scan->rs_cblock; /* current page */
}
dp = (Page) BufferGetPage(scan->rs_cbuf);
LockRelationOid(relationId, lockmode);
/*
- * Now that we have the lock, probe to see if the relation really
- * exists or not.
+ * Now that we have the lock, probe to see if the relation really exists
+ * or not.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relationId),
if (!ConditionalLockRelationOid(relationId, lockmode))
{
/* try to throw error by name; relation could be deleted... */
- char *relname = get_rel_name(relationId);
+ char *relname = get_rel_name(relationId);
if (relname)
ereport(ERROR,
else
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
- errmsg("could not obtain lock on relation with OID %u",
- relationId)));
+ errmsg("could not obtain lock on relation with OID %u",
+ relationId)));
}
}
/*
* Check for shared-cache-inval messages before trying to open the
- * relation. This is needed to cover the case where the name identifies
- * a rel that has been dropped and recreated since the start of our
+ * relation. This is needed to cover the case where the name identifies a
+ * rel that has been dropped and recreated since the start of our
* transaction: if we don't flush the old syscache entry then we'll latch
* onto that entry and suffer an error when we do RelationIdGetRelation.
* Note that relation_open does not need to do this, since a relation's
* heap_inplace_update - update a tuple "in place" (ie, overwrite it)
*
* Overwriting violates both MVCC and transactional safety, so the uses
- * of this function in Postgres are extremely limited. Nonetheless we
+ * of this function in Postgres are extremely limited. Nonetheless we
* find some places to use it.
*
* The tuple cannot change size, and therefore it's reasonable to assume
if (!ItemPointerIsValid(&scan->rs_mctid))
{
scan->rs_ctup.t_data = NULL;
+
/*
* unpin scan buffers
*/
else
{
/*
- * If we reached end of scan, rs_inited will now be false. We must
+ * If we reached end of scan, rs_inited will now be false. We must
* reset it to true to keep heapgettup from doing the wrong thing.
*/
scan->rs_inited = true;
scan->rs_cindex = scan->rs_mindex;
heapgettup_pagemode(scan,
NoMovementScanDirection,
- 0, /* needn't recheck scan keys */
+ 0, /* needn't recheck scan keys */
NULL);
}
else
heapgettup(scan,
NoMovementScanDirection,
- 0, /* needn't recheck scan keys */
+ 0, /* needn't recheck scan keys */
NULL);
}
}
}
/*
- * Perform XLogInsert for a heap-update operation. Caller must already
+ * Perform XLogInsert for a heap-update operation. Caller must already
* have modified the buffer(s) and marked them dirty.
*/
static XLogRecPtr
if (record->xl_info & XLOG_HEAP_INIT_PAGE)
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- true);
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ true);
Assert(BufferIsValid(buffer));
page = (Page) BufferGetPage(buffer);
else
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
- false);
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ false);
if (!BufferIsValid(buffer))
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
/* Set forward chain link in t_ctid */
htup->t_ctid = xlrec->newtid;
}
+
/*
* this test is ugly, but necessary to avoid thinking that insert change
* is already applied
return;
page = (Page) BufferGetPage(buffer);
- if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
+ if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
out_target(StringInfo buf, xl_heaptid *target)
{
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
void
appendStringInfo(buf, "update: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_MOVE)
{
appendStringInfo(buf, "move: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; new %u/%u",
- ItemPointerGetBlockNumber(&(xlrec->newtid)),
- ItemPointerGetOffsetNumber(&(xlrec->newtid)));
+ ItemPointerGetBlockNumber(&(xlrec->newtid)),
+ ItemPointerGetOffsetNumber(&(xlrec->newtid)));
}
else if (info == XLOG_HEAP_CLEAN)
{
xl_heap_clean *xlrec = (xl_heap_clean *) rec;
appendStringInfo(buf, "clean: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->block);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->block);
}
else if (info == XLOG_HEAP_NEWPAGE)
{
xl_heap_newpage *xlrec = (xl_heap_newpage *) rec;
appendStringInfo(buf, "newpage: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->blkno);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->blkno);
}
else if (info == XLOG_HEAP_LOCK)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.64 2006/09/10 23:33:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.65 2006/10/04 00:29:48 momjian Exp $
*
*
* INTERFACE ROUTINES
VARATT_SIZEP(result) |= VARATT_FLAG_COMPRESSED;
if (length == 0)
- return result; /* Can save a lot of work at this point! */
+ return result; /* Can save a lot of work at this point! */
startchunk = sliceoffset / TOAST_MAX_CHUNK_SIZE;
endchunk = (sliceoffset + length - 1) / TOAST_MAX_CHUNK_SIZE;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.58 2006/07/31 20:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.59 2006/10/04 00:29:48 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
else
scan->keyData = NULL;
- scan->is_multiscan = false; /* caller may change this */
+ scan->is_multiscan = false; /* caller may change this */
scan->kill_prior_tuple = false;
scan->ignore_killed_tuples = true; /* default setting */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.94 2006/07/31 20:08:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/index/indexam.c,v 1.95 2006/10/04 00:29:48 momjian Exp $
*
* INTERFACE ROUTINES
* index_open - open an index relation by relation OID
* index_open - open an index relation by relation OID
*
* If lockmode is not "NoLock", the specified kind of lock is
- * obtained on the index. (Generally, NoLock should only be
+ * obtained on the index. (Generally, NoLock should only be
* used if the caller knows it has some appropriate lock on the
* index already.)
*
* index_getnext on this scan; index_getnext_indexitem will not use the
* heapRelation link (nor the snapshot). However, the caller had better
* be holding some kind of lock on the heap relation in any case, to ensure
- * no one deletes it (or the index) out from under us. Caller must also
+ * no one deletes it (or the index) out from under us. Caller must also
* be holding a lock on the index.
*/
IndexScanDesc
*
* callback routine tells whether a given main-heap tuple is
* to be deleted
- *
+ *
* return value is an optional palloc'd struct of statistics
* ----------------
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.143 2006/08/25 04:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.144 2006/10/04 00:29:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
htup.t_self = itup->t_tid;
if (heap_fetch(heapRel, SnapshotSelf, &htup, &hbuffer,
- false, NULL))
+ false, NULL))
{
/* Normal case --- it's still live */
ReleaseBuffer(hbuffer);
* + updates the metapage if a true root or fast root is split.
*
* On entry, we must have the right buffer in which to do the
- * insertion, and the buffer must be pinned and write-locked. On return,
+ * insertion, and the buffer must be pinned and write-locked. On return,
* we will have dropped both the pin and the lock on the buffer.
*
* If 'afteritem' is >0 then the new tuple must be inserted after the
if (!rel->rd_istemp)
{
xl_btree_insert xlrec;
- BlockNumber xldownlink;
+ BlockNumber xldownlink;
xl_btree_metadata xlmeta;
uint8 xlinfo;
XLogRecPtr recptr;
sopaque = (BTPageOpaque) PageGetSpecialPointer(spage);
if (sopaque->btpo_prev != ropaque->btpo_prev)
elog(PANIC, "right sibling's left-link doesn't match");
+
/*
* Check to see if we can set the SPLIT_END flag in the right-hand
* split page; this can save some I/O for vacuum since it need not
* proceed to the right sibling. We can set the flag if the right
- * sibling has a different cycleid: that means it could not be part
- * of a group of pages that were all split off from the same ancestor
+ * sibling has a different cycleid: that means it could not be part of
+ * a group of pages that were all split off from the same ancestor
* page. If you're confused, imagine that page A splits to A B and
* then again, yielding A C B, while vacuum is in progress. Tuples
* originally in A could now be in either B or C, hence vacuum must
- * examine both pages. But if D, our right sibling, has a different
+ * examine both pages. But if D, our right sibling, has a different
* cycleid then it could not contain any tuples that were in A when
* the vacuum started.
*/
*
* NO EREPORT(ERROR) till right sibling is updated. We can get away with
* not starting the critical section till here because we haven't been
- * scribbling on the original page yet, and we don't care about the
- * new sibling until it's linked into the btree.
+ * scribbling on the original page yet, and we don't care about the new
+ * sibling until it's linked into the btree.
*/
START_CRIT_SECTION();
* Direct access to page is not good but faster - we should implement
* some new func in page API. Note we only store the tuples
* themselves, knowing that the item pointers are in the same order
- * and can be reconstructed by scanning the tuples. See comments
- * for _bt_restore_page().
+ * and can be reconstructed by scanning the tuples. See comments for
+ * _bt_restore_page().
*/
xlrec.leftlen = ((PageHeader) leftpage)->pd_special -
((PageHeader) leftpage)->pd_upper;
static void
_bt_vacuum_one_page(Relation rel, Buffer buffer)
{
- OffsetNumber deletable[MaxOffsetNumber];
- int ndeletable = 0;
- OffsetNumber offnum,
- minoff,
- maxoff;
- Page page = BufferGetPage(buffer);
- BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
+ OffsetNumber deletable[MaxOffsetNumber];
+ int ndeletable = 0;
+ OffsetNumber offnum,
+ minoff,
+ maxoff;
+ Page page = BufferGetPage(buffer);
+ BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * Scan over all items to see which ones need deleted
- * according to LP_DELETE flags.
+ * Scan over all items to see which ones need deleted according to
+ * LP_DELETE flags.
*/
minoff = P_FIRSTDATAKEY(opaque);
maxoff = PageGetMaxOffsetNumber(page);
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemId = PageGetItemId(page, offnum);
+ ItemId itemId = PageGetItemId(page, offnum);
if (ItemIdDeleted(itemId))
deletable[ndeletable++] = offnum;
if (ndeletable > 0)
_bt_delitems(rel, buffer, deletable, ndeletable);
+
/*
* Note: if we didn't find any LP_DELETE items, then the page's
- * BTP_HAS_GARBAGE hint bit is falsely set. We do not bother
- * expending a separate write to clear it, however. We will clear
- * it when we split the page.
+ * BTP_HAS_GARBAGE hint bit is falsely set. We do not bother expending a
+ * separate write to clear it, however. We will clear it when we split
+ * the page.
*/
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.99 2006/07/25 19:13:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.100 2006/10/04 00:29:49 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
/*
* Since the cache might be stale, we check the page more carefully
- * here than normal. We *must* check that it's not deleted.
- * If it's not alone on its level, then we reject too --- this
- * may be overly paranoid but better safe than sorry. Note we
- * don't check P_ISROOT, because that's not set in a "fast root".
+ * here than normal. We *must* check that it's not deleted. If it's
+ * not alone on its level, then we reject too --- this may be overly
+ * paranoid but better safe than sorry. Note we don't check P_ISROOT,
+ * because that's not set in a "fast root".
*/
if (!P_IGNORE(rootopaque) &&
rootopaque->btpo.level == rootlevel &&
PageIndexMultiDelete(page, itemnos, nitems);
/*
- * We can clear the vacuum cycle ID since this page has certainly
- * been processed by the current vacuum scan.
+ * We can clear the vacuum cycle ID since this page has certainly been
+ * processed by the current vacuum scan.
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_cycleid = 0;
/*
* Mark the page as not containing any LP_DELETE items. This is not
- * certainly true (there might be some that have recently been marked,
- * but weren't included in our target-item list), but it will almost
- * always be true and it doesn't seem worth an additional page scan
- * to check it. Remember that BTP_HAS_GARBAGE is only a hint anyway.
+ * certainly true (there might be some that have recently been marked, but
+ * weren't included in our target-item list), but it will almost always be
+ * true and it doesn't seem worth an additional page scan to check it.
+ * Remember that BTP_HAS_GARBAGE is only a hint anyway.
*/
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.151 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.152 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
BlockNumber *freePages;
int nFreePages; /* number of entries in freePages[] */
int maxFreePages; /* allocated size of freePages[] */
- BlockNumber totFreePages; /* true total # of free pages */
+ BlockNumber totFreePages; /* true total # of free pages */
MemoryContext pagedelcontext;
} BTVacState;
IndexBulkDeleteCallback callback, void *callback_state,
BTCycleId cycleid);
static void btvacuumpage(BTVacState *vstate, BlockNumber blkno,
- BlockNumber orig_blkno);
+ BlockNumber orig_blkno);
/*
buildstate.spool = _bt_spoolinit(index, indexInfo->ii_Unique, false);
/*
- * If building a unique index, put dead tuples in a second spool to
- * keep them out of the uniqueness check.
+ * If building a unique index, put dead tuples in a second spool to keep
+ * them out of the uniqueness check.
*/
if (indexInfo->ii_Unique)
buildstate.spool2 = _bt_spoolinit(index, false, true);
#endif /* BTREE_BUILD_STATS */
/*
- * If we are reindexing a pre-existing index, it is critical to send out
- * a relcache invalidation SI message to ensure all backends re-read the
- * index metapage. We expect that the caller will ensure that happens
- * (typically as a side effect of updating index stats, but it must
- * happen even if the stats don't change!)
+ * If we are reindexing a pre-existing index, it is critical to send out a
+ * relcache invalidation SI message to ensure all backends re-read the
+ * index metapage. We expect that the caller will ensure that happens
+ * (typically as a side effect of updating index stats, but it must happen
+ * even if the stats don't change!)
*/
/*
if (scan->kill_prior_tuple)
{
/*
- * Yes, remember it for later. (We'll deal with all such tuples
+ * Yes, remember it for later. (We'll deal with all such tuples
* at once right before leaving the index page.) The test for
* numKilled overrun is not just paranoia: if the caller reverses
* direction in the indexscan then the same item might get entered
- * multiple times. It's not worth trying to optimize that, so we
+ * multiple times. It's not worth trying to optimize that, so we
* don't detect it, but instead just forget any excess entries.
*/
if (so->killedItems == NULL)
while (ntids < max_tids)
{
/*
- * Advance to next tuple within page. This is the same as the
- * easy case in _bt_next().
+ * Advance to next tuple within page. This is the same as the easy
+ * case in _bt_next().
*/
if (++so->currPos.itemIndex > so->currPos.lastItem)
{
so->keyData = (ScanKey) palloc(scan->numberOfKeys * sizeof(ScanKeyData));
else
so->keyData = NULL;
- so->killedItems = NULL; /* until needed */
+ so->killedItems = NULL; /* until needed */
so->numKilled = 0;
scan->opaque = so;
}
/*
* Just record the current itemIndex. If we later step to next page
- * before releasing the marked position, _bt_steppage makes a full copy
- * of the currPos struct in markPos. If (as often happens) the mark is
- * moved before we leave the page, we don't have to do that work.
+ * before releasing the marked position, _bt_steppage makes a full copy of
+ * the currPos struct in markPos. If (as often happens) the mark is moved
+ * before we leave the page, we don't have to do that work.
*/
if (BTScanPosIsValid(so->currPos))
so->markItemIndex = so->currPos.itemIndex;
if (so->markItemIndex >= 0)
{
/*
- * The mark position is on the same page we are currently on.
- * Just restore the itemIndex.
+ * The mark position is on the same page we are currently on. Just
+ * restore the itemIndex.
*/
so->currPos.itemIndex = so->markItemIndex;
- }
+ }
else
{
/* we aren't holding any read locks, but gotta drop the pin */
btbulkdelete(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
- IndexBulkDeleteResult * volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
+ IndexBulkDeleteResult *volatile stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
void *callback_state = (void *) PG_GETARG_POINTER(3);
Relation rel = info->index;
IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/*
- * If btbulkdelete was called, we need not do anything, just return
- * the stats from the latest btbulkdelete call. If it wasn't called,
- * we must still do a pass over the index, to recycle any newly-recyclable
- * pages and to obtain index statistics.
+ * If btbulkdelete was called, we need not do anything, just return the
+ * stats from the latest btbulkdelete call. If it wasn't called, we must
+ * still do a pass over the index, to recycle any newly-recyclable pages
+ * and to obtain index statistics.
*
* Since we aren't going to actually delete any leaf items, there's no
* need to go through all the vacuum-cycle-ID pushups.
/*
* During a non-FULL vacuum it's quite possible for us to be fooled by
* concurrent page splits into double-counting some index tuples, so
- * disbelieve any total that exceeds the underlying heap's count.
- * (We can't check this during btbulkdelete.)
+ * disbelieve any total that exceeds the underlying heap's count. (We
+ * can't check this during btbulkdelete.)
*/
if (!info->vacuum_full)
{
bool needLock;
/*
- * Reset counts that will be incremented during the scan; needed in
- * case of multiple scans during a single VACUUM command
+ * Reset counts that will be incremented during the scan; needed in case
+ * of multiple scans during a single VACUUM command
*/
stats->num_index_tuples = 0;
stats->pages_deleted = 0;
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * The outer loop iterates over all index pages except the metapage,
- * in physical order (we hope the kernel will cooperate in providing
+ * The outer loop iterates over all index pages except the metapage, in
+ * physical order (we hope the kernel will cooperate in providing
* read-ahead for speed). It is critical that we visit all leaf pages,
* including ones added after we start the scan, else we might fail to
* delete some deletable tuples. Hence, we must repeatedly check the
* relation length. We must acquire the relation-extension lock while
* doing so to avoid a race condition: if someone else is extending the
* relation, there is a window where bufmgr/smgr have created a new
- * all-zero page but it hasn't yet been write-locked by _bt_getbuf().
- * If we manage to scan such a page here, we'll improperly assume it can
- * be recycled. Taking the lock synchronizes things enough to prevent a
+ * all-zero page but it hasn't yet been write-locked by _bt_getbuf(). If
+ * we manage to scan such a page here, we'll improperly assume it can be
+ * recycled. Taking the lock synchronizes things enough to prevent a
* problem: either num_pages won't include the new page, or _bt_getbuf
* already has write lock on the buffer and it will be fully initialized
* before we can examine it. (See also vacuumlazy.c, which has the same
- * issue.) Also, we need not worry if a page is added immediately after
+ * issue.) Also, we need not worry if a page is added immediately after
* we look; the page splitting code already has write-lock on the left
- * page before it adds a right page, so we must already have processed
- * any tuples due to be moved into such a page.
+ * page before it adds a right page, so we must already have processed any
+ * tuples due to be moved into such a page.
*
* We can skip locking for new or temp relations, however, since no one
* else could be accessing them.
void *callback_state = vstate->callback_state;
Relation rel = info->index;
bool delete_now;
- BlockNumber recurse_to;
+ BlockNumber recurse_to;
Buffer buf;
Page page;
BTPageOpaque opaque;
_bt_checkpage(rel, buf);
/*
- * If we are recursing, the only case we want to do anything with is
- * a live leaf page having the current vacuum cycle ID. Any other state
- * implies we already saw the page (eg, deleted it as being empty).
- * In particular, we don't want to risk adding it to freePages twice.
+ * If we are recursing, the only case we want to do anything with is a
+ * live leaf page having the current vacuum cycle ID. Any other state
+ * implies we already saw the page (eg, deleted it as being empty). In
+ * particular, we don't want to risk adding it to freePages twice.
*/
if (blkno != orig_blkno)
{
OffsetNumber deletable[MaxOffsetNumber];
int ndeletable;
OffsetNumber offnum,
- minoff,
- maxoff;
+ minoff,
+ maxoff;
/*
- * Trade in the initial read lock for a super-exclusive write
- * lock on this page. We must get such a lock on every leaf page
- * over the course of the vacuum scan, whether or not it actually
- * contains any deletable tuples --- see nbtree/README.
+ * Trade in the initial read lock for a super-exclusive write lock on
+ * this page. We must get such a lock on every leaf page over the
+ * course of the vacuum scan, whether or not it actually contains any
+ * deletable tuples --- see nbtree/README.
*/
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
LockBufferForCleanup(buf);
/*
- * Check whether we need to recurse back to earlier pages. What
- * we are concerned about is a page split that happened since we
- * started the vacuum scan. If the split moved some tuples to a
- * lower page then we might have missed 'em. If so, set up for
- * tail recursion. (Must do this before possibly clearing
- * btpo_cycleid below!)
+ * Check whether we need to recurse back to earlier pages. What we
+ * are concerned about is a page split that happened since we started
+ * the vacuum scan. If the split moved some tuples to a lower page
+ * then we might have missed 'em. If so, set up for tail recursion.
+ * (Must do this before possibly clearing btpo_cycleid below!)
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid &&
recurse_to = opaque->btpo_next;
/*
- * Scan over all items to see which ones need deleted
- * according to the callback function.
+ * Scan over all items to see which ones need deleted according to the
+ * callback function.
*/
ndeletable = 0;
minoff = P_FIRSTDATAKEY(opaque);
}
/*
- * Apply any needed deletes. We issue just one _bt_delitems()
- * call per page, so as to minimize WAL traffic.
+ * Apply any needed deletes. We issue just one _bt_delitems() call
+ * per page, so as to minimize WAL traffic.
*/
if (ndeletable > 0)
{
* have any deletions to do. (If we do, _bt_delitems takes care
* of this.) This ensures we won't process the page again.
*
- * We treat this like a hint-bit update because there's no need
- * to WAL-log it.
+ * We treat this like a hint-bit update because there's no need to
+ * WAL-log it.
*/
if (vstate->cycleid != 0 &&
opaque->btpo_cycleid == vstate->cycleid)
}
/*
- * If it's now empty, try to delete; else count the live tuples.
- * We don't delete when recursing, though, to avoid putting entries
- * into freePages out-of-order (doesn't seem worth any extra code to
- * handle the case).
+ * If it's now empty, try to delete; else count the live tuples. We
+ * don't delete when recursing, though, to avoid putting entries into
+ * freePages out-of-order (doesn't seem worth any extra code to handle
+ * the case).
*/
if (minoff > maxoff)
delete_now = (blkno == orig_blkno);
stats->pages_deleted++;
/*
- * During VACUUM FULL it's okay to recycle deleted pages
- * immediately, since there can be no other transactions scanning
- * the index. Note that we will only recycle the current page and
- * not any parent pages that _bt_pagedel might have recursed to;
- * this seems reasonable in the name of simplicity. (Trying to do
- * otherwise would mean we'd have to sort the list of recyclable
- * pages we're building.)
+ * During VACUUM FULL it's okay to recycle deleted pages immediately,
+ * since there can be no other transactions scanning the index. Note
+ * that we will only recycle the current page and not any parent pages
+ * that _bt_pagedel might have recursed to; this seems reasonable in
+ * the name of simplicity. (Trying to do otherwise would mean we'd
+ * have to sort the list of recyclable pages we're building.)
*/
if (ndel && info->vacuum_full)
{
_bt_relbuf(rel, buf);
/*
- * This is really tail recursion, but if the compiler is too stupid
- * to optimize it as such, we'd eat an uncomfortably large amount of
- * stack space per recursion level (due to the deletable[] array).
- * A failure is improbable since the number of levels isn't likely to be
- * large ... but just in case, let's hand-optimize into a loop.
+ * This is really tail recursion, but if the compiler is too stupid to
+ * optimize it as such, we'd eat an uncomfortably large amount of stack
+ * space per recursion level (due to the deletable[] array). A failure is
+ * improbable since the number of levels isn't likely to be large ... but
+ * just in case, let's hand-optimize into a loop.
*/
if (recurse_to != P_NONE)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.106 2006/08/24 01:18:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool _bt_readpage(IndexScanDesc scan, ScanDirection dir,
- OffsetNumber offnum);
+ OffsetNumber offnum);
static bool _bt_steppage(IndexScanDesc scan, ScanDirection dir);
static Buffer _bt_walk_left(Relation rel, Buffer buf);
static bool _bt_endpoint(IndexScanDesc scan, ScanDirection dir);
* _bt_first() -- Find the first item in a scan.
*
* We need to be clever about the direction of scan, the search
- * conditions, and the tree ordering. We find the first item (or,
+ * conditions, and the tree ordering. We find the first item (or,
* if backwards scan, the last item) in the tree that satisfies the
* qualifications in the scan key. On success exit, the page containing
* the current index tuple is pinned but not locked, and data about
{
ScanKey cur = startKeys[i];
- Assert(cur->sk_attno == i+1);
+ Assert(cur->sk_attno == i + 1);
if (cur->sk_flags & SK_ROW_HEADER)
{
* Row comparison header: look to the first row member instead.
*
* The member scankeys are already in insertion format (ie, they
- * have sk_func = 3-way-comparison function), but we have to
- * watch out for nulls, which _bt_preprocess_keys didn't check.
- * A null in the first row member makes the condition unmatchable,
- * just like qual_ok = false.
+ * have sk_func = 3-way-comparison function), but we have to watch
+ * out for nulls, which _bt_preprocess_keys didn't check. A null
+ * in the first row member makes the condition unmatchable, just
+ * like qual_ok = false.
*/
cur = (ScanKey) DatumGetPointer(cur->sk_argument);
Assert(cur->sk_flags & SK_ROW_MEMBER);
if (cur->sk_flags & SK_ISNULL)
return false;
memcpy(scankeys + i, cur, sizeof(ScanKeyData));
+
/*
* If the row comparison is the last positioning key we accepted,
* try to add additional keys from the lower-order row members.
*
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
- * the last item on this page. Adjust the starting offset if needed.
- * (If this results in an offset before the first item or after the last
- * one, _bt_readpage will report no items found, and then we'll step to
- * the next page as needed.)
+ * the last item on this page. Adjust the starting offset if needed. (If
+ * this results in an offset before the first item or after the last one,
+ * _bt_readpage will report no items found, and then we'll step to the
+ * next page as needed.)
*/
if (goback)
offnum = OffsetNumberPrev(offnum);
BTScanOpaque so = (BTScanOpaque) scan->opaque;
/*
- * Advance to next tuple on current page; or if there's no more,
- * try to step to the next page with data.
+ * Advance to next tuple on current page; or if there's no more, try to
+ * step to the next page with data.
*/
if (ScanDirectionIsForward(dir))
{
/*
* we must save the page's right-link while scanning it; this tells us
- * where to step right to after we're done with these items. There is
- * no corresponding need for the left-link, since splits always go right.
+ * where to step right to after we're done with these items. There is no
+ * corresponding need for the left-link, since splits always go right.
*/
so->currPos.nextPage = opaque->btpo_next;
_bt_killitems(scan, true);
/*
- * Before we modify currPos, make a copy of the page data if there
- * was a mark position that needs it.
+ * Before we modify currPos, make a copy of the page data if there was a
+ * mark position that needs it.
*/
if (so->markItemIndex >= 0)
{
so->currPos.moreRight = true;
/*
- * Walk left to the next page with data. This is much more
- * complex than the walk-right case because of the possibility
- * that the page to our left splits while we are in flight to it,
- * plus the possibility that the page we were on gets deleted
- * after we leave it. See nbtree/README for details.
+ * Walk left to the next page with data. This is much more complex
+ * than the walk-right case because of the possibility that the page
+ * to our left splits while we are in flight to it, plus the
+ * possibility that the page we were on gets deleted after we leave
+ * it. See nbtree/README for details.
*/
for (;;)
{
return false;
/*
- * Okay, we managed to move left to a non-deleted page.
- * Done if it's not half-dead and contains matching tuples.
- * Else loop back and do it all again.
+ * Okay, we managed to move left to a non-deleted page. Done if
+ * it's not half-dead and contains matching tuples. Else loop back
+ * and do it all again.
*/
page = BufferGetPage(so->currPos.buf);
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.106 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.107 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void _bt_sortaddtup(Page page, Size itemsize,
IndexTuple itup, OffsetNumber itup_off);
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state,
- IndexTuple itup);
+ IndexTuple itup);
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
static void _bt_load(BTWriteState *wstate,
BTSpool *btspool, BTSpool *btspool2);
state->btps_full = (BLCKSZ * (100 - BTREE_NONLEAF_FILLFACTOR) / 100);
else
state->btps_full = RelationGetTargetPageFreeSpace(wstate->index,
- BTREE_DEFAULT_FILLFACTOR);
+ BTREE_DEFAULT_FILLFACTOR);
/* no parent level, yet */
state->btps_next = NULL;
Size itupsz;
/*
- * This is a handy place to check for cancel interrupts during the
- * btree load phase of index creation.
+ * This is a handy place to check for cancel interrupts during the btree
+ * load phase of index creation.
*/
CHECK_FOR_INTERRUPTS();
"or use full text indexing.")));
/*
- * Check to see if page is "full". It's definitely full if the item
- * won't fit. Otherwise, compare to the target freespace derived from
- * the fillfactor. However, we must put at least two items on each
- * page, so disregard fillfactor if we don't have that many.
+ * Check to see if page is "full". It's definitely full if the item won't
+ * fit. Otherwise, compare to the target freespace derived from the
+ * fillfactor. However, we must put at least two items on each page, so
+ * disregard fillfactor if we don't have that many.
*/
if (pgspc < itupsz || (pgspc < state->btps_full && last_off > P_FIRSTKEY))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.78 2006/07/25 19:13:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.79 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void _bt_mark_scankey_required(ScanKey skey);
static bool _bt_check_rowcompare(ScanKey skey,
- IndexTuple tuple, TupleDesc tupdesc,
- ScanDirection dir, bool *continuescan);
+ IndexTuple tuple, TupleDesc tupdesc,
+ ScanDirection dir, bool *continuescan);
/*
* comparison data ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(), unless comparison
- * data is first stored into the key entries. Currently this
+ * data is first stored into the key entries. Currently this
* routine is only called by nbtsort.c and tuplesort.c, which have
* their own comparison routines.
*/
/*
* Emit the cleaned-up keys into the outkeys[] array, and then
- * mark them if they are required. They are required (possibly
+ * mark them if they are required. They are required (possibly
* only in one direction) if all attrs before this one had "=".
*/
for (j = BTMaxStrategyNumber; --j >= 0;)
* Mark a scankey as "required to continue the scan".
*
* Depending on the operator type, the key may be required for both scan
- * directions or just one. Also, if the key is a row comparison header,
+ * directions or just one. Also, if the key is a row comparison header,
* we have to mark the appropriate subsidiary ScanKeys as required. In
* such cases, the first subsidiary key is required, but subsequent ones
* are required only as long as they correspond to successive index columns.
* scribbling on a data structure belonging to the index AM's caller, not on
* our private copy. This should be OK because the marking will not change
* from scan to scan within a query, and so we'd just re-mark the same way
- * anyway on a rescan. Something to keep an eye on though.
+ * anyway on a rescan. Something to keep an eye on though.
*/
static void
_bt_mark_scankey_required(ScanKey skey)
{
- int addflags;
+ int addflags;
switch (skey->sk_strategy)
{
if (skey->sk_flags & SK_ROW_HEADER)
{
- ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
- AttrNumber attno = skey->sk_attno;
+ ScanKey subkey = (ScanKey) DatumGetPointer(skey->sk_argument);
+ AttrNumber attno = skey->sk_attno;
/* First subkey should be same as the header says */
Assert(subkey->sk_attno == attno);
*continuescan = true; /* default assumption */
/*
- * If the scan specifies not to return killed tuples, then we treat
- * a killed tuple as not passing the qual. Most of the time, it's a
- * win to not bother examining the tuple's index keys, but just return
+ * If the scan specifies not to return killed tuples, then we treat a
+ * killed tuple as not passing the qual. Most of the time, it's a win to
+ * not bother examining the tuple's index keys, but just return
* immediately with continuescan = true to proceed to the next tuple.
- * However, if this is the last tuple on the page, we should check
- * the index keys to prevent uselessly advancing to the next page.
+ * However, if this is the last tuple on the page, we should check the
+ * index keys to prevent uselessly advancing to the next page.
*/
if (scan->ignore_killed_tuples && ItemIdDeleted(iid))
{
if (offnum > P_FIRSTDATAKEY(opaque))
return false;
}
+
/*
- * OK, we want to check the keys, but we'll return FALSE even
- * if the tuple passes the key tests.
+ * OK, we want to check the keys, but we'll return FALSE even if the
+ * tuple passes the key tests.
*/
tuple_valid = false;
}
{
/*
* Unlike the simple-scankey case, this isn't a disallowed case.
- * But it can never match. If all the earlier row comparison
- * columns are required for the scan direction, we can stop
- * the scan, because there can't be another tuple that will
- * succeed.
+ * But it can never match. If all the earlier row comparison
+ * columns are required for the scan direction, we can stop the
+ * scan, because there can't be another tuple that will succeed.
*/
if (subkey != (ScanKey) DatumGetPointer(skey->sk_argument))
subkey--;
*/
switch (subkey->sk_strategy)
{
- /* EQ and NE cases aren't allowed here */
+ /* EQ and NE cases aren't allowed here */
case BTLessStrategyNumber:
result = (cmpresult < 0);
break;
{
/*
* Tuple fails this qual. If it's a required qual for the current
- * scan direction, then we can conclude no further tuples will
- * pass, either. Note we have to look at the deciding column, not
+ * scan direction, then we can conclude no further tuples will pass,
+ * either. Note we have to look at the deciding column, not
* necessarily the first or last column of the row condition.
*/
if ((subkey->sk_flags & SK_BT_REQFWD) &&
* is sufficient for setting LP_DELETE hint bits.
*
* We match items by heap TID before assuming they are the right ones to
- * delete. We cope with cases where items have moved right due to insertions.
+ * delete. We cope with cases where items have moved right due to insertions.
* If an item has moved off the current page due to a split, we'll fail to
* find it and do nothing (this is not an error case --- we assume the item
* will eventually get marked in a future indexscan). Note that because we
for (i = 0; i < so->numKilled; i++)
{
- int itemIndex = so->killedItems[i];
- BTScanPosItem *kitem = &so->currPos.items[itemIndex];
- OffsetNumber offnum = kitem->indexOffset;
+ int itemIndex = so->killedItems[i];
+ BTScanPosItem *kitem = &so->currPos.items[itemIndex];
+ OffsetNumber offnum = kitem->indexOffset;
Assert(itemIndex >= so->currPos.firstItem &&
itemIndex <= so->currPos.lastItem);
}
/*
- * Since this can be redone later if needed, it's treated the same
- * as a commit-hint-bit status update for heap tuples: we mark the
- * buffer dirty but don't make a WAL log entry.
+ * Since this can be redone later if needed, it's treated the same as a
+ * commit-hint-bit status update for heap tuples: we mark the buffer dirty
+ * but don't make a WAL log entry.
*
* Whenever we mark anything LP_DELETEd, we also set the page's
* BTP_HAS_GARBAGE flag, which is likewise just a hint.
LockBuffer(so->currPos.buf, BUFFER_LOCK_UNLOCK);
/*
- * Always reset the scan state, so we don't look for same items
- * on other pages.
+ * Always reset the scan state, so we don't look for same items on other
+ * pages.
*/
so->numKilled = 0;
}
/*
* The following routines manage a shared-memory area in which we track
* assignment of "vacuum cycle IDs" to currently-active btree vacuuming
- * operations. There is a single counter which increments each time we
- * start a vacuum to assign it a cycle ID. Since multiple vacuums could
+ * operations. There is a single counter which increments each time we
+ * start a vacuum to assign it a cycle ID. Since multiple vacuums could
* be active concurrently, we have to track the cycle ID for each active
* vacuum; this requires at most MaxBackends entries (usually far fewer).
* We assume at most one vacuum can be active for a given index.
LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
/* Assign the next cycle ID, being careful to avoid zero */
- do {
+ do
+ {
result = ++(btvacinfo->cycle_ctr);
} while (result == 0);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.37 2006/08/07 16:57:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.38 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* in correct itemno sequence, but physically the opposite order from the
* original, because we insert them in the opposite of itemno order. This
* does not matter in any current btree code, but it's something to keep an
- * eye on. Is it worth changing just on general principles?
+ * eye on. Is it worth changing just on general principles?
*/
static void
_bt_restore_page(Page page, char *from, int len)
char *datapos;
int datalen;
xl_btree_metadata md;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
datapos = (char *) xlrec + SizeOfBtreeInsert;
datalen = record->xl_len - SizeOfBtreeInsert;
if (!(record->xl_info & XLR_BKP_BLOCK_1))
{
buffer = XLogReadBuffer(reln,
- ItemPointerGetBlockNumber(&(xlrec->target.tid)),
+ ItemPointerGetBlockNumber(&(xlrec->target.tid)),
false);
if (BufferIsValid(buffer))
{
else
{
if (PageAddItem(page, (Item) datapos, datalen,
- ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
+ ItemPointerGetOffsetNumber(&(xlrec->target.tid)),
LP_USED) == InvalidOffsetNumber)
elog(PANIC, "btree_insert_redo: failed to add item");
OffsetNumber targetoff;
BlockNumber leftsib;
BlockNumber rightsib;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
Buffer buffer;
Page page;
BTPageOpaque pageop;
}
/*
- * Mark the page as not containing any LP_DELETE items --- see comments
- * in _bt_delitems().
+ * Mark the page as not containing any LP_DELETE items --- see comments in
+ * _bt_delitems().
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
Buffer buffer;
Page page;
BTPageOpaque pageop;
- BlockNumber downlink = 0;
+ BlockNumber downlink = 0;
reln = XLogOpenRelation(xlrec->node);
buffer = XLogReadBuffer(reln, xlrec->rootblk, true);
out_target(StringInfo buf, xl_btreetid *target)
{
appendStringInfo(buf, "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
- ItemPointerGetBlockNumber(&(target->tid)),
- ItemPointerGetOffsetNumber(&(target->tid)));
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
+ ItemPointerGetBlockNumber(&(target->tid)),
+ ItemPointerGetOffsetNumber(&(target->tid)));
}
void
appendStringInfo(buf, "split_l: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R:
appendStringInfo(buf, "split_r: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_L_ROOT:
appendStringInfo(buf, "split_l_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_SPLIT_R_ROOT:
appendStringInfo(buf, "split_r_root: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; oth %u; rgh %u",
- xlrec->otherblk, xlrec->rightblk);
+ xlrec->otherblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_DELETE:
xl_btree_delete *xlrec = (xl_btree_delete *) rec;
appendStringInfo(buf, "delete: rel %u/%u/%u; blk %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode, xlrec->block);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode, xlrec->block);
break;
}
case XLOG_BTREE_DELETE_PAGE:
appendStringInfo(buf, "delete_page: ");
out_target(buf, &(xlrec->target));
appendStringInfo(buf, "; dead %u; left %u; right %u",
- xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
+ xlrec->deadblk, xlrec->leftblk, xlrec->rightblk);
break;
}
case XLOG_BTREE_NEWROOT:
xl_btree_newroot *xlrec = (xl_btree_newroot *) rec;
appendStringInfo(buf, "newroot: rel %u/%u/%u; root %u lev %u",
- xlrec->node.spcNode, xlrec->node.dbNode,
- xlrec->node.relNode,
- xlrec->rootblk, xlrec->level);
+ xlrec->node.spcNode, xlrec->node.dbNode,
+ xlrec->node.relNode,
+ xlrec->rootblk, xlrec->level);
break;
}
default:
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.39 2006/07/13 16:49:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.40 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
clog_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == CLOG_ZEROPAGE)
{
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.20 2006/07/20 00:46:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.21 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Truncate the SLRU files. This could be done at any time, but
- * checkpoint seems a reasonable place for it. There is one exception:
- * if we are called during xlog recovery, then shared->latest_page_number
- * isn't valid (because StartupMultiXact hasn't been called yet) and
- * so SimpleLruTruncate would get confused. It seems best not to risk
+ * checkpoint seems a reasonable place for it. There is one exception: if
+ * we are called during xlog recovery, then shared->latest_page_number
+ * isn't valid (because StartupMultiXact hasn't been called yet) and so
+ * SimpleLruTruncate would get confused. It seems best not to risk
* removing any data during recovery anyway, so don't truncate.
*/
if (!InRecovery)
int i;
appendStringInfo(buf, "create multixact %u offset %u:",
- xlrec->mid, xlrec->moff);
+ xlrec->mid, xlrec->moff);
for (i = 0; i < xlrec->nxids; i++)
appendStringInfo(buf, " %u", xlrec->xids[i]);
}
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.38 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.39 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct SlruFlushData
{
- int num_files; /* # files actually open */
- int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
+ int num_files; /* # files actually open */
+ int fd[MAX_FLUSH_BUFFERS]; /* their FD's */
int segno[MAX_FLUSH_BUFFERS]; /* their log seg#s */
} SlruFlushData;
* page_lru_count entries to be "reset" to lower values than they should have,
* in case a process is delayed while it executes this macro. With care in
* SlruSelectLRUPage(), this does little harm, and in any case the absolute
- * worst possible consequence is a nonoptimal choice of page to evict. The
+ * worst possible consequence is a nonoptimal choice of page to evict. The
* gain from allowing concurrent reads of SLRU pages seems worth it.
*/
#define SlruRecentlyUsed(shared, slotno) \
/* we assume nslots isn't so large as to risk overflow */
sz = MAXALIGN(sizeof(SlruSharedData));
- sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
+ sz += MAXALIGN(nslots * sizeof(char *)); /* page_buffer[] */
sz += MAXALIGN(nslots * sizeof(SlruPageStatus)); /* page_status[] */
- sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
- sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
- sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
-
+ sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
+ sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
+ sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
+ sz += MAXALIGN(nslots * sizeof(LWLockId)); /* buffer_locks[] */
+
return BUFFERALIGN(sz) + BLCKSZ * nslots;
}
* Easiest way to deal with that is to accept references to
* nonexistent files here and in SlruPhysicalReadPage.)
*
- * Note: it is possible for more than one backend to be executing
- * this code simultaneously for different pages of the same file.
- * Hence, don't use O_EXCL or O_TRUNC or anything like that.
+ * Note: it is possible for more than one backend to be executing this
+ * code simultaneously for different pages of the same file. Hence,
+ * don't use O_EXCL or O_TRUNC or anything like that.
*/
SlruFileName(ctl, path, segno);
fd = BasicOpenFile(path, O_RDWR | O_CREAT | PG_BINARY,
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not seek in file \"%s\" to offset %u: %m.",
- path, offset)));
+ errdetail("Could not seek in file \"%s\" to offset %u: %m.",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not read from file \"%s\" at offset %u: %m.",
- path, offset)));
+ errdetail("Could not read from file \"%s\" at offset %u: %m.",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("Could not write to file \"%s\" at offset %u: %m.",
- path, offset)));
+ errdetail("Could not write to file \"%s\" at offset %u: %m.",
+ path, offset)));
break;
case SLRU_FSYNC_FAILED:
ereport(ERROR,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.23 2006/10/03 21:21:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.24 2006/10/04 00:29:49 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
if (errno != ENOENT || giveWarning)
ereport(WARNING,
(errcode_for_file_access(),
- errmsg("could not remove twophase state file \"%s\": %m",
- path)));
+ errmsg("could not remove twophase state file \"%s\": %m",
+ path)));
}
/*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.74 2006/09/26 17:21:39 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.75 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/*
* Use volatile pointer to prevent code rearrangement; other backends
- * could be examining my subxids info concurrently, and we don't
- * want them to see an invalid intermediate state, such as
- * incrementing nxids before filling the array entry. Note we are
- * assuming that TransactionId and int fetch/store are atomic.
+ * could be examining my subxids info concurrently, and we don't want
+ * them to see an invalid intermediate state, such as incrementing
+ * nxids before filling the array entry. Note we are assuming that
+ * TransactionId and int fetch/store are atomic.
*/
volatile PGPROC *myproc = MyProc;
myproc->xid = xid;
else
{
- int nxids = myproc->subxids.nxids;
+ int nxids = myproc->subxids.nxids;
if (nxids < PGPROC_MAX_CACHED_SUBXIDS)
{
* The place where we actually get into deep trouble is halfway around
* from the oldest existing XID. (This calculation is probably off by one
* or two counts, because the special XIDs reduce the size of the loop a
- * little bit. But we throw in plenty of slop below, so it doesn't
+ * little bit. But we throw in plenty of slop below, so it doesn't
* matter.)
*/
xidWrapLimit = oldest_datminxid + (MaxTransactionId >> 1);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.226 2006/08/27 19:11:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.227 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
XactLockTableInsert(s->transactionId);
- PG_TRACE1 (transaction__start, s->transactionId);
+ PG_TRACE1(transaction__start, s->transactionId);
/*
- * set transaction_timestamp() (a/k/a now()). We want this to be the
- * same as the first command's statement_timestamp(), so don't do a
- * fresh GetCurrentTimestamp() call (which'd be expensive anyway).
+ * set transaction_timestamp() (a/k/a now()). We want this to be the same
+ * as the first command's statement_timestamp(), so don't do a fresh
+ * GetCurrentTimestamp() call (which'd be expensive anyway).
*/
xactStartTimestamp = stmtStartTimestamp;
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->xid = InvalidTransactionId;
MyProc->xmin = InvalidTransactionId;
- MyProc->inVacuum = false; /* must be cleared with xid/xmin */
+ MyProc->inVacuum = false; /* must be cleared with xid/xmin */
/* Clear the subtransaction-XID cache too while holding the lock */
MyProc->subxids.nxids = 0;
LWLockRelease(ProcArrayLock);
}
- PG_TRACE1 (transaction__commit, s->transactionId);
+ PG_TRACE1(transaction__commit, s->transactionId);
/*
* This is all post-commit cleanup. Note that if an error is raised here,
LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
MyProc->xid = InvalidTransactionId;
MyProc->xmin = InvalidTransactionId;
- MyProc->inVacuum = false; /* must be cleared with xid/xmin */
+ MyProc->inVacuum = false; /* must be cleared with xid/xmin */
/* Clear the subtransaction-XID cache too while holding the lock */
MyProc->subxids.nxids = 0;
LWLockRelease(ProcArrayLock);
}
- PG_TRACE1 (transaction__abort, s->transactionId);
+ PG_TRACE1(transaction__abort, s->transactionId);
/*
* Post-abort cleanup. See notes in CommitTransaction() concerning
int i;
appendStringInfo(buf, "%04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
if (xlrec->nrels > 0)
{
appendStringInfo(buf, "; rels:");
RelFileNode rnode = xlrec->xnodes[i];
appendStringInfo(buf, " %u/%u/%u",
- rnode.spcNode, rnode.dbNode, rnode.relNode);
+ rnode.spcNode, rnode.dbNode, rnode.relNode);
}
}
if (xlrec->nsubxacts > 0)
int i;
appendStringInfo(buf, "%04u-%02u-%02u %02u:%02u:%02u",
- tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_hour, tm->tm_min, tm->tm_sec);
+ tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_hour, tm->tm_min, tm->tm_sec);
if (xlrec->nrels > 0)
{
appendStringInfo(buf, "; rels:");
RelFileNode rnode = xlrec->xnodes[i];
appendStringInfo(buf, " %u/%u/%u",
- rnode.spcNode, rnode.dbNode, rnode.relNode);
+ rnode.spcNode, rnode.dbNode, rnode.relNode);
}
}
if (xlrec->nsubxacts > 0)
void
xact_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_XACT_COMMIT)
{
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.249 2006/08/21 16:16:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.250 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct XLogCtlWrite
{
- XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
- int curridx; /* cache index of next block to write */
- time_t lastSegSwitchTime; /* time of last xlog segment switch */
+ XLogwrtResult LogwrtResult; /* current value of LogwrtResult */
+ int curridx; /* cache index of next block to write */
+ time_t lastSegSwitchTime; /* time of last xlog segment switch */
} XLogCtlWrite;
/*
bool use_lock);
static int XLogFileOpen(uint32 log, uint32 seg);
static int XLogFileRead(uint32 log, uint32 seg, int emode);
-static void XLogFileClose(void);
+static void XLogFileClose(void);
static bool RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize);
static int PreallocXlogFiles(XLogRecPtr endptr);
static void xlog_outrec(StringInfo buf, XLogRecord *record);
#endif
static bool read_backup_label(XLogRecPtr *checkPointLoc,
- XLogRecPtr *minRecoveryLoc);
+ XLogRecPtr *minRecoveryLoc);
static void rm_redo_error_callback(void *arg);
/*
* NOTE: We disallow len == 0 because it provides a useful bit of extra
* error checking in ReadRecord. This means that all callers of
- * XLogInsert must supply at least some not-in-a-buffer data. However,
- * we make an exception for XLOG SWITCH records because we don't want
- * them to ever cross a segment boundary.
+ * XLogInsert must supply at least some not-in-a-buffer data. However, we
+ * make an exception for XLOG SWITCH records because we don't want them to
+ * ever cross a segment boundary.
*/
if (len == 0 && !isLogSwitch)
elog(PANIC, "invalid xlog record length %u", len);
* checkpoint, so it's better to be slow in this case and fast otherwise.
*
* If we aren't doing full-page writes then RedoRecPtr doesn't actually
- * affect the contents of the XLOG record, so we'll update our local
- * copy but not force a recomputation.
+ * affect the contents of the XLOG record, so we'll update our local copy
+ * but not force a recomputation.
*/
if (!XLByteEQ(RedoRecPtr, Insert->RedoRecPtr))
{
}
/*
- * Also check to see if forcePageWrites was just turned on; if we
- * weren't already doing full-page writes then go back and recompute.
- * (If it was just turned off, we could recompute the record without
- * full pages, but we choose not to bother.)
+ * Also check to see if forcePageWrites was just turned on; if we weren't
+ * already doing full-page writes then go back and recompute. (If it was
+ * just turned off, we could recompute the record without full pages, but
+ * we choose not to bother.)
*/
if (Insert->forcePageWrites && !doPageWrites)
{
INSERT_RECPTR(RecPtr, Insert, curridx);
/*
- * If the record is an XLOG_SWITCH, and we are exactly at the start
- * of a segment, we need not insert it (and don't want to because
- * we'd like consecutive switch requests to be no-ops). Instead,
- * make sure everything is written and flushed through the end of
- * the prior segment, and return the prior segment's end address.
+ * If the record is an XLOG_SWITCH, and we are exactly at the start of a
+ * segment, we need not insert it (and don't want to because we'd like
+ * consecutive switch requests to be no-ops). Instead, make sure
+ * everything is written and flushed through the end of the prior segment,
+ * and return the prior segment's end address.
*/
if (isLogSwitch &&
(RecPtr.xrecoff % XLogSegSize) == SizeOfXLogLongPHD)
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
{
- StringInfoData buf;
+ StringInfoData buf;
initStringInfo(&buf);
appendStringInfo(&buf, "INSERT @ %X/%X: ",
LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
/*
- * Flush through the end of the page containing XLOG_SWITCH,
- * and perform end-of-segment actions (eg, notifying archiver).
+ * Flush through the end of the page containing XLOG_SWITCH, and
+ * perform end-of-segment actions (eg, notifying archiver).
*/
WriteRqst = XLogCtl->xlblocks[curridx];
FlushRqst.Write = WriteRqst;
* switch.
*
* This is also the right place to notify the Archiver that the
- * segment is ready to copy to archival storage, and to update
- * the timer for archive_timeout.
+ * segment is ready to copy to archival storage, and to update the
+ * timer for archive_timeout.
*/
if (finishing_seg || (xlog_switch && last_iteration))
{
Assert(openLogFile >= 0);
/*
- * posix_fadvise is problematic on many platforms: on older x86 Linux
- * it just dumps core, and there are reports of problems on PPC platforms
- * as well. The following is therefore disabled for the time being.
- * We could consider some kind of configure test to see if it's safe to
- * use, but since we lack hard evidence that there's any useful performance
- * gain to be had, spending time on that seems unprofitable for now.
+ * posix_fadvise is problematic on many platforms: on older x86 Linux it
+ * just dumps core, and there are reports of problems on PPC platforms as
+ * well. The following is therefore disabled for the time being. We could
+ * consider some kind of configure test to see if it's safe to use, but
+ * since we lack hard evidence that there's any useful performance gain to
+ * be had, spending time on that seems unprofitable for now.
*/
#ifdef NOT_USED
/*
* WAL segment files will not be re-read in normal operation, so we advise
- * OS to release any cached pages. But do not do so if WAL archiving is
+ * OS to release any cached pages. But do not do so if WAL archiving is
* active, because archiver process could use the cache to read the WAL
* segment.
*
- * While O_DIRECT works for O_SYNC, posix_fadvise() works for fsync()
- * and O_SYNC, and some platforms only have posix_fadvise().
+ * While O_DIRECT works for O_SYNC, posix_fadvise() works for fsync() and
+ * O_SYNC, and some platforms only have posix_fadvise().
*/
#if defined(HAVE_DECL_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
if (!XLogArchivingActive())
posix_fadvise(openLogFile, 0, 0, POSIX_FADV_DONTNEED);
#endif
-
-#endif /* NOT_USED */
+#endif /* NOT_USED */
if (close(openLogFile))
ereport(PANIC,
- (errcode_for_file_access(),
- errmsg("could not close log file %u, segment %u: %m",
- openLogId, openLogSeg)));
+ (errcode_for_file_access(),
+ errmsg("could not close log file %u, segment %u: %m",
+ openLogId, openLogSeg)));
openLogFile = -1;
}
got_record:;
/*
- * xl_len == 0 is bad data for everything except XLOG SWITCH, where
- * it is required.
+ * xl_len == 0 is bad data for everything except XLOG SWITCH, where it is
+ * required.
*/
if (record->xl_rmid == RM_XLOG_ID && record->xl_info == XLOG_SWITCH)
{
EndRecPtr.xrecoff = RecPtr->xrecoff + MAXALIGN(total_len);
ReadRecPtr = *RecPtr;
memcpy(buffer, record, total_len);
+
/*
* Special processing if it's an XLOG SWITCH record
*/
EndRecPtr.xrecoff += XLogSegSize - 1;
EndRecPtr.xrecoff -= EndRecPtr.xrecoff % XLogSegSize;
nextRecord = NULL; /* definitely not on same page */
+
/*
- * Pretend that readBuf contains the last page of the segment.
- * This is just to avoid Assert failure in StartupXLOG if XLOG
- * ends with this segment.
+ * Pretend that readBuf contains the last page of the segment. This is
+ * just to avoid Assert failure in StartupXLOG if XLOG ends with this
+ * segment.
*/
readOff = XLogSegSize - XLOG_BLCKSZ;
}
WriteControlFile(void)
{
int fd;
- char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
+ char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
char *localeptr;
/*
if (ControlFile->xlog_blcksz != XLOG_BLCKSZ)
ereport(FATAL,
(errmsg("database files are incompatible with server"),
- errdetail("The database cluster was initialized with XLOG_BLCKSZ %d,"
- " but the server was compiled with XLOG_BLCKSZ %d.",
- ControlFile->xlog_blcksz, XLOG_BLCKSZ),
+ errdetail("The database cluster was initialized with XLOG_BLCKSZ %d,"
+ " but the server was compiled with XLOG_BLCKSZ %d.",
+ ControlFile->xlog_blcksz, XLOG_BLCKSZ),
errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->xlog_seg_size != XLOG_SEG_SIZE)
ereport(FATAL,
* Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
* in additional info.)
*/
- XLogCtl->XLogCacheByte = (Size) XLOG_BLCKSZ * XLOGbuffers;
+ XLogCtl->XLogCacheByte = (Size) XLOG_BLCKSZ *XLOGbuffers;
XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
XLogCtl->Insert.currpage = (XLogPageHeader) (XLogCtl->pages);
" you will have to use the last backup for recovery.")));
else if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY)
ereport(LOG,
- (errmsg("database system was interrupted while in recovery at log time %s",
- str_time(ControlFile->checkPointCopy.time)),
- errhint("If this has occurred more than once some data may be corrupted"
- " and you may need to choose an earlier recovery target.")));
+ (errmsg("database system was interrupted while in recovery at log time %s",
+ str_time(ControlFile->checkPointCopy.time)),
+ errhint("If this has occurred more than once some data may be corrupted"
+ " and you may need to choose an earlier recovery target.")));
else if (ControlFile->state == DB_IN_PRODUCTION)
ereport(LOG,
(errmsg("database system was interrupted at %s",
int rmid;
/*
- * Update pg_control to show that we are recovering and to show
- * the selected checkpoint as the place we are starting from.
- * We also mark pg_control with any minimum recovery stop point
- * obtained from a backup history file.
+ * Update pg_control to show that we are recovering and to show the
+ * selected checkpoint as the place we are starting from. We also mark
+ * pg_control with any minimum recovery stop point obtained from a
+ * backup history file.
*/
if (InArchiveRecovery)
{
UpdateControlFile();
/*
- * If there was a backup label file, it's done its job and the
- * info has now been propagated into pg_control. We must get rid of
- * the label file so that if we crash during recovery, we'll pick up
- * at the latest recovery restartpoint instead of going all the way
- * back to the backup start point. It seems prudent though to just
- * rename the file out of the way rather than delete it completely.
+ * If there was a backup label file, it's done its job and the info
+ * has now been propagated into pg_control. We must get rid of the
+ * label file so that if we crash during recovery, we'll pick up at
+ * the latest recovery restartpoint instead of going all the way back
+ * to the backup start point. It seems prudent though to just rename
+ * the file out of the way rather than delete it completely.
*/
if (haveBackupLabel)
{
{
bool recoveryContinue = true;
bool recoveryApply = true;
- ErrorContextCallback errcontext;
+ ErrorContextCallback errcontext;
InRedo = true;
ereport(LOG,
#ifdef WAL_DEBUG
if (XLOG_DEBUG)
{
- StringInfoData buf;
+ StringInfoData buf;
initStringInfo(&buf);
appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
- ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
- EndRecPtr.xlogid, EndRecPtr.xrecoff);
+ ReadRecPtr.xlogid, ReadRecPtr.xrecoff,
+ EndRecPtr.xlogid, EndRecPtr.xrecoff);
xlog_outrec(&buf, record);
appendStringInfo(&buf, " - ");
RmgrTable[record->xl_rmid].rm_desc(&buf,
record->xl_info,
- XLogRecGetData(record));
+ XLogRecGetData(record));
elog(LOG, "%s", buf.data);
pfree(buf.data);
}
void
GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch)
{
- uint32 ckptXidEpoch;
- TransactionId ckptXid;
- TransactionId nextXid;
+ uint32 ckptXidEpoch;
+ TransactionId ckptXid;
+ TransactionId nextXid;
/* Must read checkpoint info first, else have race condition */
{
CheckPointCLOG();
CheckPointSUBTRANS();
CheckPointMultiXact();
- FlushBufferPool(); /* performs all required fsyncs */
+ FlushBufferPool(); /* performs all required fsyncs */
/* We deliberately delay 2PC checkpointing as long as possible */
CheckPointTwoPhase(checkPointRedo);
}
static void
RecoveryRestartPoint(const CheckPoint *checkPoint)
{
- int elapsed_secs;
- int rmid;
+ int elapsed_secs;
+ int rmid;
/*
- * Do nothing if the elapsed time since the last restartpoint is less
- * than half of checkpoint_timeout. (We use a value less than
+ * Do nothing if the elapsed time since the last restartpoint is less than
+ * half of checkpoint_timeout. (We use a value less than
* checkpoint_timeout so that variations in the timing of checkpoints on
* the master, or speed of transmission of WAL segments to a slave, won't
* make the slave skip a restartpoint once it's synced with the master.)
CheckPointGuts(checkPoint->redo);
/*
- * Update pg_control so that any subsequent crash will restart from
- * this checkpoint. Note: ReadRecPtr gives the XLOG address of the
- * checkpoint record itself.
+ * Update pg_control so that any subsequent crash will restart from this
+ * checkpoint. Note: ReadRecPtr gives the XLOG address of the checkpoint
+ * record itself.
*/
ControlFile->prevCheckPoint = ControlFile->checkPoint;
ControlFile->checkPoint = ReadRecPtr;
void
xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
{
- uint8 info = xl_info & ~XLR_INFO_MASK;
+ uint8 info = xl_info & ~XLR_INFO_MASK;
if (info == XLOG_CHECKPOINT_SHUTDOWN ||
info == XLOG_CHECKPOINT_ONLINE)
CheckPoint *checkpoint = (CheckPoint *) rec;
appendStringInfo(buf, "checkpoint: redo %X/%X; undo %X/%X; "
- "tli %u; xid %u/%u; oid %u; multi %u; offset %u; %s",
- checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
- checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
- checkpoint->ThisTimeLineID,
- checkpoint->nextXidEpoch, checkpoint->nextXid,
- checkpoint->nextOid,
- checkpoint->nextMulti,
- checkpoint->nextMultiOffset,
- (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
+ "tli %u; xid %u/%u; oid %u; multi %u; offset %u; %s",
+ checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
+ checkpoint->undo.xlogid, checkpoint->undo.xrecoff,
+ checkpoint->ThisTimeLineID,
+ checkpoint->nextXidEpoch, checkpoint->nextXid,
+ checkpoint->nextOid,
+ checkpoint->nextMulti,
+ checkpoint->nextMultiOffset,
+ (info == XLOG_CHECKPOINT_SHUTDOWN) ? "shutdown" : "online");
}
else if (info == XLOG_NEXTOID)
{
for (i = 0; i < XLR_MAX_BKP_BLOCKS; i++)
{
if (record->xl_info & XLR_SET_BKP_BLOCK(i))
- appendStringInfo(buf, "; bkpb%d", i+1);
+ appendStringInfo(buf, "; bkpb%d", i + 1);
}
appendStringInfo(buf, ": %s", RmgrTable[record->xl_rmid].rm_name);
* Mark backup active in shared memory. We must do full-page WAL writes
* during an on-line backup even if not doing so at other times, because
* it's quite possible for the backup dump to obtain a "torn" (partially
- * written) copy of a database page if it reads the page concurrently
- * with our write to the same page. This can be fixed as long as the
- * first write to the page in the WAL sequence is a full-page write.
- * Hence, we turn on forcePageWrites and then force a CHECKPOINT, to
- * ensure there are no dirty pages in shared memory that might get
- * dumped while the backup is in progress without having a corresponding
- * WAL record. (Once the backup is complete, we need not force full-page
- * writes anymore, since we expect that any pages not modified during
- * the backup interval must have been correctly captured by the backup.)
+ * written) copy of a database page if it reads the page concurrently with
+ * our write to the same page. This can be fixed as long as the first
+ * write to the page in the WAL sequence is a full-page write. Hence, we
+ * turn on forcePageWrites and then force a CHECKPOINT, to ensure there
+ * are no dirty pages in shared memory that might get dumped while the
+ * backup is in progress without having a corresponding WAL record. (Once
+ * the backup is complete, we need not force full-page writes anymore,
+ * since we expect that any pages not modified during the backup interval
+ * must have been correctly captured by the backup.)
*
- * We must hold WALInsertLock to change the value of forcePageWrites,
- * to ensure adequate interlocking against XLogInsert().
+ * We must hold WALInsertLock to change the value of forcePageWrites, to
+ * ensure adequate interlocking against XLogInsert().
*/
LWLockAcquire(WALInsertLock, LW_EXCLUSIVE);
if (XLogCtl->Insert.forcePageWrites)
PG_TRY();
{
/*
- * Force a CHECKPOINT. Aside from being necessary to prevent torn
+ * Force a CHECKPOINT. Aside from being necessary to prevent torn
* page problems, this guarantees that two successive backup runs will
* have different checkpoint positions and hence different history
* file names, even if nothing happened in between.
LWLockRelease(WALInsertLock);
/*
- * Force a switch to a new xlog segment file, so that the backup
- * is valid as soon as archiver moves out the current segment file.
- * We'll report the end address of the XLOG SWITCH record as the backup
- * stopping point.
+ * Force a switch to a new xlog segment file, so that the backup is valid
+ * as soon as archiver moves out the current segment file. We'll report
+ * the end address of the XLOG SWITCH record as the backup stopping point.
*/
stoppoint = RequestXLogSwitch();
BACKUP_LABEL_FILE)));
/*
- * Clean out any no-longer-needed history files. As a side effect,
- * this will post a .ready file for the newly created history file,
- * notifying the archiver that history file may be archived immediately.
+ * Clean out any no-longer-needed history files. As a side effect, this
+ * will post a .ready file for the newly created history file, notifying
+ * the archiver that history file may be archived immediately.
*/
CleanupBackupHistory();
pg_switch_xlog(PG_FUNCTION_ARGS)
{
text *result;
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
char location[MAXFNAMELEN];
if (!superuser())
uint32 xrecoff;
XLogRecPtr locationpoint;
char xlogfilename[MAXFNAMELEN];
- Datum values[2];
- bool isnull[2];
- TupleDesc resultTupleDesc;
- HeapTuple resultHeapTuple;
- Datum result;
+ Datum values[2];
+ bool isnull[2];
+ TupleDesc resultTupleDesc;
+ HeapTuple resultHeapTuple;
+ Datum result;
/*
* Read input and parse
*/
locationstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(location)));
+ PointerGetDatum(location)));
if (sscanf(locationstr, "%X/%X", &uxlogid, &uxrecoff) != 2)
ereport(ERROR,
locationpoint.xrecoff = uxrecoff;
/*
- * Construct a tuple descriptor for the result row. This must match
- * this function's pg_proc entry!
+ * Construct a tuple descriptor for the result row. This must match this
+ * function's pg_proc entry!
*/
resultTupleDesc = CreateTemplateTupleDesc(2, false);
TupleDescInitEntry(resultTupleDesc, (AttrNumber) 1, "file_name",
char xlogfilename[MAXFNAMELEN];
locationstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(location)));
+ PointerGetDatum(location)));
if (sscanf(locationstr, "%X/%X", &uxlogid, &uxrecoff) != 2)
ereport(ERROR,
XLogFileName(xlogfilename, ThisTimeLineID, xlogid, xlogseg);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(xlogfilename)));
+ CStringGetDatum(xlogfilename)));
PG_RETURN_TEXT_P(result);
}
static void
rm_redo_error_callback(void *arg)
{
- XLogRecord *record = (XLogRecord *) arg;
- StringInfoData buf;
+ XLogRecord *record = (XLogRecord *) arg;
+ StringInfoData buf;
initStringInfo(&buf);
RmgrTable[record->xl_rmid].rm_desc(&buf,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.47 2006/07/14 14:52:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.48 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
hash_seq_init(&status, invalid_page_tab);
/*
- * Our strategy is to emit WARNING messages for all remaining entries
- * and only PANIC after we've dumped all the available info.
+ * Our strategy is to emit WARNING messages for all remaining entries and
+ * only PANIC after we've dumped all the available info.
*/
while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
{
if (!init)
{
/* check that page has been initialized */
- Page page = (Page) BufferGetPage(buffer);
+ Page page = (Page) BufferGetPage(buffer);
if (PageIsNew((PageHeader) page))
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.224 2006/08/15 22:36:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.225 2006/10/04 00:29:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* When we are a dummy process, we aren't going to do the full
- * InitPostgres pushups, but there are a couple of things that need
- * to get lit up even in a dummy process.
+ * InitPostgres pushups, but there are a couple of things that need to get
+ * lit up even in a dummy process.
*/
if (IsUnderPostmaster)
{
case BS_XLOG_STARTUP:
bootstrap_signals();
StartupXLOG();
+
/*
* These next two functions don't consider themselves critical,
* but we'd best PANIC anyway if they fail.
* We postpone actually building the indexes until just before we're
* finished with initialization, however. This is because the indexes
* themselves have catalog entries, and those have to be included in the
- * indexes on those catalogs. Doing it in two phases is the simplest
+ * indexes on those catalogs. Doing it in two phases is the simplest
* way of making sure the indexes have the right contents at the end.
*/
void
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.131 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.132 2006/10/04 00:29:50 momjian Exp $
*
* NOTES
* See acl.h.
static AclMode string_to_privilege(const char *privname);
static const char *privilege_to_string(AclMode privilege);
static AclMode restrict_and_check_grant(bool is_grant, AclMode avail_goptions,
- bool all_privs, AclMode privileges,
- Oid objectId, Oid grantorId,
- AclObjectKind objkind, char *objname);
+ bool all_privs, AclMode privileges,
+ Oid objectId, Oid grantorId,
+ AclObjectKind objkind, char *objname);
static AclMode pg_aclmask(AclObjectKind objkind, Oid table_oid, Oid roleid,
AclMode mask, AclMaskHow how);
AclMode privileges, Oid objectId, Oid grantorId,
AclObjectKind objkind, char *objname)
{
- AclMode this_privileges;
- AclMode whole_mask;
+ AclMode this_privileges;
+ AclMode whole_mask;
switch (objkind)
{
}
/*
- * If we found no grant options, consider whether to issue a hard
- * error. Per spec, having any privilege at all on the object will
- * get you by here.
+ * If we found no grant options, consider whether to issue a hard error.
+ * Per spec, having any privilege at all on the object will get you by
+ * here.
*/
if (avail_goptions == ACL_NO_RIGHTS)
{
/*
* Restrict the operation to what we can actually grant or revoke, and
- * issue a warning if appropriate. (For REVOKE this isn't quite what
- * the spec says to do: the spec seems to want a warning only if no
- * privilege bits actually change in the ACL. In practice that
- * behavior seems much too noisy, as well as inconsistent with the
- * GRANT case.)
+ * issue a warning if appropriate. (For REVOKE this isn't quite what the
+ * spec says to do: the spec seems to want a warning only if no privilege
+ * bits actually change in the ACL. In practice that behavior seems much
+ * too noisy, as well as inconsistent with the GRANT case.)
*/
this_privileges = privileges & ACL_OPTION_TO_PRIVS(avail_goptions);
if (is_grant)
if (this_privileges == 0)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
- errmsg("no privileges were granted for \"%s\"", objname)));
+ errmsg("no privileges were granted for \"%s\"", objname)));
else if (!all_privs && this_privileges != privileges)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_GRANTED),
- errmsg("not all privileges were granted for \"%s\"", objname)));
+ errmsg("not all privileges were granted for \"%s\"", objname)));
}
else
{
if (this_privileges == 0)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
- errmsg("no privileges could be revoked for \"%s\"", objname)));
+ errmsg("no privileges could be revoked for \"%s\"", objname)));
else if (!all_privs && this_privileges != privileges)
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
*/
switch (stmt->objtype)
{
- /*
- * Because this might be a sequence, we test both relation
- * and sequence bits, and later do a more limited test
- * when we know the object type.
- */
+ /*
+ * Because this might be a sequence, we test both relation and
+ * sequence bits, and later do a more limited test when we know
+ * the object type.
+ */
case ACL_OBJECT_RELATION:
all_privileges = ACL_ALL_RIGHTS_RELATION | ACL_ALL_RIGHTS_SEQUENCE;
errormsg = _("invalid privilege type %s for relation");
if (stmt->privileges == NIL)
{
istmt.all_privs = true;
+
/*
* will be turned into ACL_ALL_RIGHTS_* by the internal routines
* depending on the object type
}
else
this_privileges = istmt->privileges;
-
+
/*
- * The GRANT TABLE syntax can be used for sequences and
- * non-sequences, so we have to look at the relkind to
- * determine the supported permissions. The OR of
- * table and sequence permissions were already checked.
+ * The GRANT TABLE syntax can be used for sequences and non-sequences,
+ * so we have to look at the relkind to determine the supported
+ * permissions. The OR of table and sequence permissions were already
+ * checked.
*/
if (istmt->objtype == ACL_OBJECT_RELATION)
{
if (pg_class_tuple->relkind == RELKIND_SEQUENCE)
{
/*
- * For backward compatibility, throw just a warning
- * for invalid sequence permissions when using the
- * non-sequence GRANT syntax is used.
+ * For backward compatibility, throw just a warning for
+ * invalid sequence permissions when using the non-sequence
+ * GRANT syntax is used.
*/
if (this_privileges & ~((AclMode) ACL_ALL_RIGHTS_SEQUENCE))
{
/*
- * Mention the object name because the user needs to
- * know which operations succeeded. This is required
- * because WARNING allows the command to continue.
+ * Mention the object name because the user needs to know
+ * which operations succeeded. This is required because
+ * WARNING allows the command to continue.
*/
ereport(WARNING,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
else
{
if (this_privileges & ~((AclMode) ACL_ALL_RIGHTS_RELATION))
+
/*
- * USAGE is the only permission supported by sequences
- * but not by non-sequences. Don't mention the object
- * name because we didn't in the combined TABLE |
- * SEQUENCE check.
+ * USAGE is the only permission supported by sequences but
+ * not by non-sequences. Don't mention the object name
+ * because we didn't in the combined TABLE | SEQUENCE
+ * check.
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("invalid privilege type USAGE for table")));
+ errmsg("invalid privilege type USAGE for table")));
}
}
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
istmt->all_privs, this_privileges,
relOid, grantorId,
- pg_class_tuple->relkind == RELKIND_SEQUENCE
- ? ACL_KIND_SEQUENCE : ACL_KIND_CLASS,
+ pg_class_tuple->relkind == RELKIND_SEQUENCE
+ ? ACL_KIND_SEQUENCE : ACL_KIND_CLASS,
NameStr(pg_class_tuple->relname));
/*
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
&grantorId, &avail_goptions);
/*
- * Restrict the privileges to what we can actually grant, and emit
- * the standards-mandated warning and error messages.
+ * Restrict the privileges to what we can actually grant, and emit the
+ * standards-mandated warning and error messages.
*/
this_privileges =
restrict_and_check_grant(istmt->is_grant, avail_goptions,
*
* As of 7.4 we have some updatable system views; those shouldn't be
* protected in this way. Assume the view rules can take care of
- * themselves. ACL_USAGE is if we ever have system sequences.
+ * themselves. ACL_USAGE is if we ever have system sequences.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE | ACL_USAGE)) &&
IsSystemClass(classForm) &&
{
/* No ACL, so build default ACL */
acl = acldefault(classForm->relkind == RELKIND_SEQUENCE ?
- ACL_OBJECT_SEQUENCE : ACL_OBJECT_RELATION,
+ ACL_OBJECT_SEQUENCE : ACL_OBJECT_RELATION,
ownerId);
aclDatum = (Datum) 0;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.67 2006/07/31 20:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/catalog.c,v 1.68 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Hard-wiring this list is pretty grotty, but we really need it so that
* we can compute the locktag for a relation (and then lock it) without
- * having already read its pg_class entry. If we try to retrieve relisshared
+ * having already read its pg_class entry. If we try to retrieve relisshared
* from pg_class with no pre-existing lock, there is a race condition against
* anyone who is concurrently committing a change to the pg_class entry:
* since we read system catalog entries under SnapshotNow, it's possible
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.59 2006/08/20 21:56:16 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.60 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int numrefs; /* current number of references */
int maxrefs; /* current size of palloc'd array */
};
+
/* typedef ObjectAddresses appears in dependency.h */
/* for find_expr_references_walker */
{
ObjectAddresses *implicit;
ObjectAddresses *alreadyDeleted;
- Relation depRel;
- int i;
+ Relation depRel;
+ int i;
implicit = new_object_addresses();
alreadyDeleted = new_object_addresses();
continue;
/*
- * Add the objects dependent on this one to the global list of implicit
- * objects.
+ * Add the objects dependent on this one to the global list of
+ * implicit objects.
*/
findAutoDeletableObjects(&obj, implicit, depRel, false);
}
*/
/*
- * Step 3: delete the object itself, and save it to the list of
- * deleted objects if appropiate.
+ * Step 3: delete the object itself, and save it to the list of deleted
+ * objects if appropiate.
*/
doDeletion(object);
if (alreadyDeleted != NULL)
}
if (IsA(node, RelabelType))
{
- RelabelType *relab = (RelabelType *) node;
+ RelabelType *relab = (RelabelType *) node;
/* since there is no function dependency, need to depend on type */
add_object_address(OCLASS_TYPE, relab->resulttype, 0,
}
if (IsA(node, RowExpr))
{
- RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *rowexpr = (RowExpr *) node;
add_object_address(OCLASS_TYPE, rowexpr->row_typeid, 0,
context->addrs);
ObjectAddresses *
new_object_addresses(void)
{
- ObjectAddresses *addrs;
+ ObjectAddresses *addrs;
addrs = palloc(sizeof(ObjectAddresses));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.312 2006/08/02 01:59:44 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.313 2006/10/04 00:29:50 momjian Exp $
*
*
* INTERFACE ROUTINES
tup = heap_formtuple(RelationGetDescr(pg_class_desc), values, nulls);
/*
- * The new tuple must have the oid already chosen for the rel. Sure
- * would be embarrassing to do this sort of thing in polite company.
+ * The new tuple must have the oid already chosen for the rel. Sure would
+ * be embarrassing to do this sort of thing in polite company.
*/
HeapTupleSetOid(tup, new_rel_oid);
else
{
/*
- * Other relations will not have Xids in them, so set the initial value
- * to InvalidTransactionId.
+ * Other relations will not have Xids in them, so set the initial
+ * value to InvalidTransactionId.
*/
new_rel_reltup->relminxid = InvalidTransactionId;
new_rel_reltup->relvacuumxid = InvalidTransactionId;
* with the heap relation to zero tuples.
*
* The routine will truncate and then reconstruct the indexes on
- * the specified relation. Caller must hold exclusive lock on rel.
+ * the specified relation. Caller must hold exclusive lock on rel.
*/
static void
RelationTruncateIndexes(Relation heapRelation)
return;
/*
- * Otherwise, must scan pg_constraint. We make one pass with all the
+ * Otherwise, must scan pg_constraint. We make one pass with all the
* relations considered; if this finds nothing, then all is well.
*/
dependents = heap_truncate_find_FKs(oids);
*/
foreach(cell, oids)
{
- Oid relid = lfirst_oid(cell);
- ListCell *cell2;
+ Oid relid = lfirst_oid(cell);
+ ListCell *cell2;
dependents = heap_truncate_find_FKs(list_make1_oid(relid));
foreach(cell2, dependents)
{
- Oid relid2 = lfirst_oid(cell2);
+ Oid relid2 = lfirst_oid(cell2);
if (!list_member_oid(oids, relid2))
{
- char *relname = get_rel_name(relid);
- char *relname2 = get_rel_name(relid2);
+ char *relname = get_rel_name(relid);
+ char *relname2 = get_rel_name(relid2);
if (tempTables)
ereport(ERROR,
errmsg("cannot truncate a table referenced in a foreign key constraint"),
errdetail("Table \"%s\" references \"%s\".",
relname2, relname),
- errhint("Truncate table \"%s\" at the same time, "
- "or use TRUNCATE ... CASCADE.",
- relname2)));
+ errhint("Truncate table \"%s\" at the same time, "
+ "or use TRUNCATE ... CASCADE.",
+ relname2)));
}
}
}
* behavior to change depending on chance locations of rows in pg_constraint.)
*
* Note: caller should already have appropriate lock on all rels mentioned
- * in relationIds. Since adding or dropping an FK requires exclusive lock
+ * in relationIds. Since adding or dropping an FK requires exclusive lock
* on both rels, this ensures that the answer will be stable.
*/
List *
HeapTuple tuple;
/*
- * Must scan pg_constraint. Right now, it is a seqscan because
- * there is no available index on confrelid.
+ * Must scan pg_constraint. Right now, it is a seqscan because there is
+ * no available index on confrelid.
*/
fkeyRel = heap_open(ConstraintRelationId, AccessShareLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.273 2006/08/25 04:06:46 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.274 2006/10/04 00:29:50 momjian Exp $
*
*
* INTERFACE ROUTINES
{
Tuplesortstate *tuplesort; /* for sorting the index TIDs */
/* statistics (for debug purposes only): */
- double htups, itups, tups_inserted;
+ double htups,
+ itups,
+ tups_inserted;
} v_i_state;
/* non-export function prototypes */
* allow_system_table_mods: allow table to be a system catalog
* skip_build: true to skip the index_build() step for the moment; caller
* must do it later (typically via reindex_index())
- * concurrent: if true, do not lock the table against writers. The index
+ * concurrent: if true, do not lock the table against writers. The index
* will be marked "invalid" and the caller must take additional steps
* to fix it up.
*
* (but not VACUUM).
*/
heapRelation = heap_open(heapRelationId,
- (concurrent ? ShareUpdateExclusiveLock : ShareLock));
+ (concurrent ? ShareUpdateExclusiveLock : ShareLock));
/*
* The index will be in the same namespace as its parent table, and is
errmsg("user-defined indexes on system catalog tables are not supported")));
/*
- * concurrent index build on a system catalog is unsafe because we tend
- * to release locks before committing in catalogs
+ * concurrent index build on a system catalog is unsafe because we tend to
+ * release locks before committing in catalogs
*/
if (concurrent &&
IsSystemRelation(heapRelation))
{
/*
* Caller is responsible for filling the index later on. However,
- * we'd better make sure that the heap relation is correctly marked
- * as having an index.
+ * we'd better make sure that the heap relation is correctly marked as
+ * having an index.
*/
index_update_stats(heapRelation,
true,
/* other info */
ii->ii_Unique = indexStruct->indisunique;
- ii->ii_Concurrent = false; /* assume normal case */
+ ii->ii_Concurrent = false; /* assume normal case */
return ii;
}
* index_update_stats --- update pg_class entry after CREATE INDEX or REINDEX
*
* This routine updates the pg_class row of either an index or its parent
- * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
+ * relation after CREATE INDEX or REINDEX. Its rather bizarre API is designed
* to ensure we can do all the necessary work in just one update.
*
* hasindex: set relhasindex to this value
*
* NOTE: an important side-effect of this operation is that an SI invalidation
* message is sent out to all backends --- including me --- causing relcache
- * entries to be flushed or updated with the new data. This must happen even
+ * entries to be flushed or updated with the new data. This must happen even
* if we find that no change is needed in the pg_class row. When updating
* a heap entry, this ensures that other backends find out about the new
* index. When updating an index, it's important because some index AMs
index_update_stats(Relation rel, bool hasindex, bool isprimary,
Oid reltoastidxid, double reltuples)
{
- BlockNumber relpages = RelationGetNumberOfBlocks(rel);
+ BlockNumber relpages = RelationGetNumberOfBlocks(rel);
Oid relid = RelationGetRelid(rel);
Relation pg_class;
HeapTuple tuple;
*
* 1. In bootstrap mode, we have no choice --- UPDATE wouldn't work.
*
- * 2. We could be reindexing pg_class itself, in which case we can't
- * move its pg_class row because CatalogUpdateIndexes might not know
- * about all the indexes yet (see reindex_relation).
+ * 2. We could be reindexing pg_class itself, in which case we can't move
+ * its pg_class row because CatalogUpdateIndexes might not know about all
+ * the indexes yet (see reindex_relation).
*
* 3. Because we execute CREATE INDEX with just share lock on the parent
* rel (to allow concurrent index creations), an ordinary update could
* 4. Even with just a single CREATE INDEX, there's a risk factor because
* someone else might be trying to open the rel while we commit, and this
* creates a race condition as to whether he will see both or neither of
- * the pg_class row versions as valid. Again, a non-transactional update
+ * the pg_class row versions as valid. Again, a non-transactional update
* avoids the risk. It is indeterminate which state of the row the other
* process will see, but it doesn't matter (if he's only taking
* AccessShareLock, then it's not critical that he see relhasindex true).
*
* It is safe to use a non-transactional update even though our
- * transaction could still fail before committing. Setting relhasindex
+ * transaction could still fail before committing. Setting relhasindex
* true is safe even if there are no indexes (VACUUM will eventually fix
- * it), and of course the relpages and reltuples counts are correct (or
- * at least more so than the old values) regardless.
+ * it), and of course the relpages and reltuples counts are correct (or at
+ * least more so than the old values) regardless.
*/
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
/*
- * Make a copy of the tuple to update. Normally we use the syscache,
- * but we can't rely on that during bootstrap or while reindexing
- * pg_class itself.
+ * Make a copy of the tuple to update. Normally we use the syscache, but
+ * we can't rely on that during bootstrap or while reindexing pg_class
+ * itself.
*/
if (IsBootstrapProcessingMode() ||
ReindexIsProcessingHeap(RelationRelationId))
* index_build - invoke access-method-specific index build procedure
*
* On entry, the index's catalog entries are valid, and its physical disk
- * file has been created but is empty. We call the AM-specific build
+ * file has been created but is empty. We call the AM-specific build
* procedure to fill in the index contents. We then update the pg_class
* entries of the index and heap relation as needed, using statistics
* returned by ambuild as well as data passed by the caller.
estate);
/*
- * Prepare for scan of the base relation. In a normal index build,
- * we use SnapshotAny because we must retrieve all tuples and do our own
- * time qual checks (because we have to index RECENTLY_DEAD tuples).
- * In a concurrent build, we take a regular MVCC snapshot and index
- * whatever's live according to that. During bootstrap we just use
- * SnapshotNow.
+ * Prepare for scan of the base relation. In a normal index build, we use
+ * SnapshotAny because we must retrieve all tuples and do our own time
+ * qual checks (because we have to index RECENTLY_DEAD tuples). In a
+ * concurrent build, we take a regular MVCC snapshot and index whatever's
+ * live according to that. During bootstrap we just use SnapshotNow.
*/
if (IsBootstrapProcessingMode())
{
snapshot = SnapshotNow;
- OldestXmin = InvalidTransactionId; /* not used */
+ OldestXmin = InvalidTransactionId; /* not used */
}
else if (indexInfo->ii_Concurrent)
{
snapshot = CopySnapshot(GetTransactionSnapshot());
- OldestXmin = InvalidTransactionId; /* not used */
+ OldestXmin = InvalidTransactionId; /* not used */
}
else
{
scan = heap_beginscan(heapRelation, /* relation */
snapshot, /* seeself */
- 0, /* number of keys */
- NULL); /* scan key */
+ 0, /* number of keys */
+ NULL); /* scan key */
reltuples = 0;
/*
* If tuple is recently deleted then we must index it
* anyway to preserve MVCC semantics. (Pre-existing
- * transactions could try to use the index after we
- * finish building it, and may need to see such tuples.)
- * Exclude it from unique-checking, however.
+ * transactions could try to use the index after we finish
+ * building it, and may need to see such tuples.) Exclude
+ * it from unique-checking, however.
*/
indexIt = true;
tupleIsAlive = false;
* which is passed to validate_index(). Any tuples that are valid according
* to this snap, but are not in the index, must be added to the index.
* (Any tuples committed live after the snap will be inserted into the
- * index by their originating transaction. Any tuples committed dead before
+ * index by their originating transaction. Any tuples committed dead before
* the snap need not be indexed, because we will wait out all transactions
* that might care about them before we mark the index valid.)
*
* ever say "delete it". (This should be faster than a plain indexscan;
* also, not all index AMs support full-index indexscan.) Then we sort the
* TIDs, and finally scan the table doing a "merge join" against the TID list
- * to see which tuples are missing from the index. Thus we will ensure that
+ * to see which tuples are missing from the index. Thus we will ensure that
* all tuples valid according to the reference snapshot are in the index.
*
* Building a unique index this way is tricky: we might try to insert a
* were alive at the time of the reference snapshot are gone; this is
* necessary to be sure there are none left with a serializable snapshot
* older than the reference (and hence possibly able to see tuples we did
- * not index). Then we mark the index valid and commit.
+ * not index). Then we mark the index valid and commit.
*
* Doing two full table scans is a brute-force strategy. We could try to be
* cleverer, eg storing new tuples in a special area of the table (perhaps
void
validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
{
- Relation heapRelation, indexRelation;
- IndexInfo *indexInfo;
+ Relation heapRelation,
+ indexRelation;
+ IndexInfo *indexInfo;
IndexVacuumInfo ivinfo;
- v_i_state state;
+ v_i_state state;
/* Open and lock the parent heap relation */
heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
indexRelation = index_open(indexId, RowExclusiveLock);
/*
- * Fetch info needed for index_insert. (You might think this should
- * be passed in from DefineIndex, but its copy is long gone due to
- * having been built in a previous transaction.)
+ * Fetch info needed for index_insert. (You might think this should be
+ * passed in from DefineIndex, but its copy is long gone due to having
+ * been built in a previous transaction.)
*/
indexInfo = BuildIndexInfo(indexRelation);
static bool
validate_index_callback(ItemPointer itemptr, void *opaque)
{
- v_i_state *state = (v_i_state *) opaque;
+ v_i_state *state = (v_i_state *) opaque;
tuplesort_putdatum(state->tuplesort, PointerGetDatum(itemptr), false);
state->itups += 1;
TupleTableSlot *slot;
EState *estate;
ExprContext *econtext;
+
/* state variables for the merge */
ItemPointer indexcursor = NULL;
- bool tuplesort_empty = false;
+ bool tuplesort_empty = false;
/*
* sanity checks
*/
scan = heap_beginscan(heapRelation, /* relation */
snapshot, /* seeself */
- 0, /* number of keys */
- NULL); /* scan key */
+ 0, /* number of keys */
+ NULL); /* scan key */
/*
* Scan all tuples matching the snapshot.
state->htups += 1;
/*
- * "merge" by skipping through the index tuples until we find or
- * pass the current heap tuple.
+ * "merge" by skipping through the index tuples until we find or pass
+ * the current heap tuple.
*/
while (!tuplesort_empty &&
(!indexcursor ||
ItemPointerCompare(indexcursor, heapcursor) < 0))
{
- Datum ts_val;
- bool ts_isnull;
+ Datum ts_val;
+ bool ts_isnull;
if (indexcursor)
pfree(indexcursor);
* We've overshot which means this heap tuple is missing from the
* index, so insert it.
*/
- bool check_unique;
+ bool check_unique;
MemoryContextReset(econtext->ecxt_per_tuple_memory);
/*
* For the current heap tuple, extract all the attributes we use
- * in this index, and note which are null. This also performs
+ * in this index, and note which are null. This also performs
* evaluation of any expressions needed.
*/
FormIndexDatum(indexInfo,
isnull);
/*
- * If the tuple is already committed dead, we still have to
- * put it in the index (because some xacts might be able to
- * see it), but we might as well suppress uniqueness checking.
- * This is just an optimization because the index AM is not
- * supposed to raise a uniqueness failure anyway.
+ * If the tuple is already committed dead, we still have to put it
+ * in the index (because some xacts might be able to see it), but
+ * we might as well suppress uniqueness checking. This is just an
+ * optimization because the index AM is not supposed to raise a
+ * uniqueness failure anyway.
*/
if (indexInfo->ii_Unique)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.87 2006/09/14 22:05:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.88 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ObjectIdGetDatum(namespaceId));
if (HeapTupleIsValid(opertup))
{
- Oid result = HeapTupleGetOid(opertup);
+ Oid result = HeapTupleGetOid(opertup);
ReleaseSysCache(opertup);
return result;
}
/*
- * We have to find the list member that is first in the search path,
- * if there's more than one. This doubly-nested loop looks ugly,
- * but in practice there should usually be few catlist members.
+ * We have to find the list member that is first in the search path, if
+ * there's more than one. This doubly-nested loop looks ugly, but in
+ * practice there should usually be few catlist members.
*/
recomputeNamespacePath();
if (operform->oprnamespace == namespaceId)
{
- Oid result = HeapTupleGetOid(opertup);
+ Oid result = HeapTupleGetOid(opertup);
ReleaseSysCacheList(catlist);
return result;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.82 2006/07/27 19:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.83 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * If transtype is polymorphic, must have polymorphic argument also;
- * else we will have no way to deduce the actual transtype.
+ * If transtype is polymorphic, must have polymorphic argument also; else
+ * we will have no way to deduce the actual transtype.
*/
if (!hasPolyArg &&
(aggTransType == ANYARRAYOID || aggTransType == ANYELEMENTOID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition data type"),
- errdetail("An aggregate using \"anyarray\" or \"anyelement\" as transition type must have at least one argument of either type.")));
+ errdetail("An aggregate using \"anyarray\" or \"anyelement\" as transition type must have at least one argument of either type.")));
/* find the transfn */
nargs_transfn = numArgs + 1;
/*
* If the transfn is strict and the initval is NULL, make sure first input
- * type and transtype are the same (or at least binary-compatible), so that
- * it's OK to use the first input value as the initial transValue.
+ * type and transtype are the same (or at least binary-compatible), so
+ * that it's OK to use the first input value as the initial transValue.
*/
if (proc->proisstrict && agginitval == NULL)
{
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
buildoidvector(aggArgTypes,
- numArgs), /* paramTypes */
+ numArgs), /* paramTypes */
PointerGetDatum(NULL), /* allParamTypes */
PointerGetDatum(NULL), /* parameterModes */
PointerGetDatum(NULL)); /* parameterNames */
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("function %s requires run-time type coercion",
- func_signature_string(fnName, nargs, true_oid_array))));
+ func_signature_string(fnName, nargs, true_oid_array))));
}
/* Check aggregate creator has permission to call the function */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.22 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.23 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Detect whether a sequence is marked as "owned" by a column
*
* An ownership marker is an AUTO dependency from the sequence to the
- * column. If we find one, store the identity of the owning column
+ * column. If we find one, store the identity of the owning column
* into *tableId and *colId and return TRUE; else return FALSE.
*
* Note: if there's more than one such pg_depend entry then you get
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.14 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.15 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
while ((tuple = systable_getnext(scan)) != NULL)
{
- ObjectAddress obj;
- GrantObjectType objtype;
- InternalGrant istmt;
+ ObjectAddress obj;
+ GrantObjectType objtype;
+ InternalGrant istmt;
Form_pg_shdepend sdepForm = (Form_pg_shdepend) GETSTRUCT(tuple);
/* We only operate on objects in the current database */
switch (sdepForm->deptype)
{
- /* Shouldn't happen */
+ /* Shouldn't happen */
case SHARED_DEPENDENCY_PIN:
case SHARED_DEPENDENCY_INVALID:
elog(ERROR, "unexpected dependency type");
break;
case RelationRelationId:
+
/*
- * Pass recursing = true so that we don't fail on
- * indexes, owned sequences, etc when we happen
- * to visit them before their parent table.
+ * Pass recursing = true so that we don't fail on indexes,
+ * owned sequences, etc when we happen to visit them
+ * before their parent table.
*/
ATExecChangeOwner(sdepForm->objid, newrole, true);
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.107 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.108 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* initialize *values with the type name and dummy values
*
- * The representational details are the same as int4 ... it doesn't
- * really matter what they are so long as they are consistent. Also
- * note that we give it typtype = 'p' (pseudotype) as extra insurance
- * that it won't be mistaken for a usable type.
+ * The representational details are the same as int4 ... it doesn't really
+ * matter what they are so long as they are consistent. Also note that we
+ * give it typtype = 'p' (pseudotype) as extra insurance that it won't be
+ * mistaken for a usable type.
*/
i = 0;
namestrcpy(&name, typeName);
values[i++] = NameGetDatum(&name); /* typname */
values[i++] = ObjectIdGetDatum(typeNamespace); /* typnamespace */
values[i++] = ObjectIdGetDatum(GetUserId()); /* typowner */
- values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
- values[i++] = BoolGetDatum(true); /* typbyval */
- values[i++] = CharGetDatum('p'); /* typtype */
- values[i++] = BoolGetDatum(false); /* typisdefined */
+ values[i++] = Int16GetDatum(sizeof(int4)); /* typlen */
+ values[i++] = BoolGetDatum(true); /* typbyval */
+ values[i++] = CharGetDatum('p'); /* typtype */
+ values[i++] = BoolGetDatum(false); /* typisdefined */
values[i++] = CharGetDatum(DEFAULT_TYPDELIM); /* typdelim */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typelem */
- values[i++] = ObjectIdGetDatum(F_SHELL_IN); /* typinput */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typrelid */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typelem */
+ values[i++] = ObjectIdGetDatum(F_SHELL_IN); /* typinput */
values[i++] = ObjectIdGetDatum(F_SHELL_OUT); /* typoutput */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typreceive */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typsend */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typanalyze */
- values[i++] = CharGetDatum('i'); /* typalign */
- values[i++] = CharGetDatum('p'); /* typstorage */
- values[i++] = BoolGetDatum(false); /* typnotnull */
- values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
- values[i++] = Int32GetDatum(-1); /* typtypmod */
- values[i++] = Int32GetDatum(0); /* typndims */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typreceive */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typsend */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typanalyze */
+ values[i++] = CharGetDatum('i'); /* typalign */
+ values[i++] = CharGetDatum('p'); /* typstorage */
+ values[i++] = BoolGetDatum(false); /* typnotnull */
+ values[i++] = ObjectIdGetDatum(InvalidOid); /* typbasetype */
+ values[i++] = Int32GetDatum(-1); /* typtypmod */
+ values[i++] = Int32GetDatum(0); /* typndims */
nulls[i++] = 'n'; /* typdefaultbin */
nulls[i++] = 'n'; /* typdefault */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.2 2006/08/25 04:06:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/toasting.c,v 1.3 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
BootstrapToastTable(char *relName, Oid toastOid, Oid toastIndexOid)
{
- Relation rel;
+ Relation rel;
rel = heap_openrv(makeRangeVar(NULL, relName), AccessExclusiveLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.40 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.41 2006/10/04 00:29:50 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
TypeName *transType = NULL;
char *initval = NULL;
Oid *aggArgTypes;
- int numArgs;
+ int numArgs;
Oid transTypeId;
ListCell *pl;
if (oldstyle)
{
/*
- * Old style: use basetype parameter. This supports aggregates
- * of zero or one input, with input type ANY meaning zero inputs.
+ * Old style: use basetype parameter. This supports aggregates of
+ * zero or one input, with input type ANY meaning zero inputs.
*
* Historically we allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
/*
* New style: args is a list of TypeNames (possibly zero of 'em).
*/
- ListCell *lc;
- int i = 0;
+ ListCell *lc;
+ int i = 0;
if (baseType != NULL)
ereport(ERROR,
aggArgTypes = (Oid *) palloc(sizeof(Oid) * numArgs);
foreach(lc, args)
{
- TypeName *curTypeName = (TypeName *) lfirst(lc);
+ TypeName *curTypeName = (TypeName *) lfirst(lc);
aggArgTypes[i++] = typenameTypeId(NULL, curTypeName);
}
/*
* look up the aggregate's transtype.
*
- * transtype can't be a pseudo-type, since we need to be
- * able to store values of the transtype. However, we can allow
- * polymorphic transtype in some cases (AggregateCreate will check).
+ * transtype can't be a pseudo-type, since we need to be able to store
+ * values of the transtype. However, we can allow polymorphic transtype
+ * in some cases (AggregateCreate will check).
*/
transTypeId = typenameTypeId(NULL, transType);
if (get_typtype(transTypeId) == 'p' &&
*/
AggregateCreate(aggName, /* aggregate name */
aggNamespace, /* namespace */
- aggArgTypes, /* input data type(s) */
+ aggArgTypes, /* input data type(s) */
numArgs,
transfuncName, /* step function name */
finalfuncName, /* final function name */
errmsg("function %s already exists in schema \"%s\"",
funcname_signature_string(newname,
procForm->pronargs,
- procForm->proargtypes.values),
+ procForm->proargtypes.values),
get_namespace_name(namespaceOid))));
/* must be owner */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.98 2006/09/17 22:50:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.99 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
CHECK_FOR_INTERRUPTS();
/*
- * Open the relation, getting ShareUpdateExclusiveLock to ensure that
- * two ANALYZEs don't run on it concurrently. (This also locks out
- * a concurrent VACUUM, which doesn't matter much at the moment but
- * might matter if we ever try to accumulate stats on dead tuples.)
- * If the rel has been dropped since we last saw it, we don't need
- * to process it.
+ * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
+ * ANALYZEs don't run on it concurrently. (This also locks out a
+ * concurrent VACUUM, which doesn't matter much at the moment but might
+ * matter if we ever try to accumulate stats on dead tuples.) If the rel
+ * has been dropped since we last saw it, we don't need to process it.
*/
onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
if (!onerel)
if (i == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- col, RelationGetRelationName(onerel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ col, RelationGetRelationName(onerel))));
vacattrstats[tcnt] = examine_attribute(onerel, i);
if (vacattrstats[tcnt] != NULL)
tcnt++;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.153 2006/08/18 16:09:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.154 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
tuple = SearchSysCache(INDEXRELID,
ObjectIdGetDatum(rvtc->indexOid),
0, 0, 0);
- if (!HeapTupleIsValid(tuple)) /* probably can't happen */
+ if (!HeapTupleIsValid(tuple)) /* probably can't happen */
{
relation_close(OldHeap, AccessExclusiveLock);
return;
errmsg("cannot cluster on partial index \"%s\"",
RelationGetRelationName(OldIndex))));
- if (!OldIndex->rd_am->amclusterable)
+ if (!OldIndex->rd_am->amclusterable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot cluster on index \"%s\" because access method does not support clustering",
* Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.91 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.92 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* If the comment given is null or an empty string, instead delete any
* existing comment for the specified key.
*/
-void CreateSharedComments(Oid oid, Oid classoid, char *comment)
+void
+CreateSharedComments(Oid oid, Oid classoid, char *comment)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
HeapTuple newtuple = NULL;
Datum values[Natts_pg_shdescription];
/* Use the index to search for a matching old tuple */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
{
else
{
newtuple = heap_modifytuple(oldtuple, RelationGetDescr(shdescription),
- values, nulls, replaces);
+ values, nulls, replaces);
simple_heap_update(shdescription, &oldtuple->t_self, newtuple);
}
- break; /* Assume there can be only one match */
+ break; /* Assume there can be only one match */
}
systable_endscan(sd);
if (newtuple == NULL && comment != NULL)
{
newtuple = heap_formtuple(RelationGetDescr(shdescription),
- values, nulls);
+ values, nulls);
simple_heap_insert(shdescription, newtuple);
}
DeleteSharedComments(Oid oid, Oid classoid)
{
Relation shdescription;
- ScanKeyData skey[2];
- SysScanDesc sd;
+ ScanKeyData skey[2];
+ SysScanDesc sd;
HeapTuple oldtuple;
/* Use the index to search for all matching old tuples */
ScanKeyInit(&skey[0],
- Anum_pg_shdescription_objoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(oid));
+ Anum_pg_shdescription_objoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(oid));
ScanKeyInit(&skey[1],
- Anum_pg_shdescription_classoid,
- BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(classoid));
+ Anum_pg_shdescription_classoid,
+ BTEqualStrategyNumber, F_OIDEQ,
+ ObjectIdGetDatum(classoid));
shdescription = heap_open(SharedDescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(shdescription, SharedDescriptionObjIndexId, true,
- SnapshotNow, 2, skey);
+ SnapshotNow, 2, skey);
while ((oldtuple = systable_getnext(sd)) != NULL)
simple_heap_delete(shdescription, &oldtuple->t_self);
static void
CommentTablespace(List *qualname, char *comment)
{
- char *tablespace;
- Oid oid;
+ char *tablespace;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
static void
CommentRole(List *qualname, char *comment)
{
- char *role;
- Oid oid;
+ char *role;
+ Oid oid;
if (list_length(qualname) != 1)
ereport(ERROR,
if (!has_privs_of_role(GetUserId(), oid))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be member of role \"%s\" to comment upon it", role)));
+ errmsg("must be member of role \"%s\" to comment upon it", role)));
/* Call CreateSharedComments() to create/drop the comments */
CreateSharedComments(oid, AuthIdRelationId, comment);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.271 2006/08/31 03:17:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.272 2006/10/04 00:29:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* low-level state data */
CopyDest copy_dest; /* type of copy source/destination */
FILE *copy_file; /* used if copy_dest == COPY_FILE */
- StringInfo fe_msgbuf; /* used for all dests during COPY TO, only
- * for dest == COPY_NEW_FE in COPY FROM */
+ StringInfo fe_msgbuf; /* used for all dests during COPY TO, only for
+ * dest == COPY_NEW_FE in COPY FROM */
bool fe_copy; /* true for all FE copy dests */
bool fe_eof; /* true if detected end of copy data */
EolType eol_type; /* EOL type of input */
bool header_line; /* CSV header line? */
char *null_print; /* NULL marker string (server encoding!) */
int null_print_len; /* length of same */
- char *null_print_client; /* same converted to client encoding */
+ char *null_print_client; /* same converted to client encoding */
char *delim; /* column delimiter (must be 1 byte) */
char *quote; /* CSV quote char (must be 1 byte) */
char *escape; /* CSV escape char (must be 1 byte) */
/*
* Working state for COPY TO
*/
- FmgrInfo *out_functions; /* lookup info for output functions */
- MemoryContext rowcontext; /* per-row evaluation context */
+ FmgrInfo *out_functions; /* lookup info for output functions */
+ MemoryContext rowcontext; /* per-row evaluation context */
/*
* These variables are used to reduce overhead in textual COPY FROM.
* function call overhead in tight COPY loops.
*
* We must use "if (1)" because "do {} while(0)" overrides the continue/break
- * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
+ * processing. See http://www.cit.gu.edu.au/~anthony/info/C/C.macros.
*/
/*
static void DoCopyTo(CopyState cstate);
static void CopyTo(CopyState cstate);
static void CopyOneRowTo(CopyState cstate, Oid tupleOid,
- Datum *values, bool *nulls);
+ Datum *values, bool *nulls);
static void CopyFrom(CopyState cstate);
static bool CopyReadLine(CopyState cstate);
static bool CopyReadLineText(CopyState cstate);
static void CopyAttributeOutCSV(CopyState cstate, char *string,
bool use_quote, bool single_attr);
static List *CopyGetAttnums(TupleDesc tupDesc, Relation rel,
- List *attnamelist);
+ List *attnamelist);
static char *limit_printout_length(const char *str);
/* Low-level communications functions */
/* Disallow end-of-line characters */
if (strchr(cstate->delim, '\r') != NULL ||
- strchr(cstate->delim, '\n') != NULL)
+ strchr(cstate->delim, '\n') != NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("COPY delimiter cannot be newline or carriage return")));
+ errmsg("COPY delimiter cannot be newline or carriage return")));
if (strchr(cstate->null_print, '\r') != NULL ||
strchr(cstate->null_print, '\n') != NULL)
/* Open and lock the relation, using the appropriate lock type. */
cstate->rel = heap_openrv(stmt->relation,
- (is_from ? RowExclusiveLock : AccessShareLock));
+ (is_from ? RowExclusiveLock : AccessShareLock));
/* Check relation permissions. */
aclresult = pg_class_aclcheck(RelationGetRelid(cstate->rel),
* rewriting or planning. Do that now.
*
* Because the planner is not cool about not scribbling on its input,
- * we make a preliminary copy of the source querytree. This prevents
+ * we make a preliminary copy of the source querytree. This prevents
* problems in the case that the COPY is in a portal or plpgsql
* function and is executed repeatedly. (See also the same hack in
* EXPLAIN, DECLARE CURSOR and PREPARE.) XXX the planner really
/*
* Set up encoding conversion info. Even if the client and server
- * encodings are the same, we must apply pg_client_to_server() to
- * validate data in multibyte encodings.
+ * encodings are the same, we must apply pg_client_to_server() to validate
+ * data in multibyte encodings.
*/
cstate->client_encoding = pg_get_client_encoding();
cstate->need_transcoding =
if (is_from) /* copy from file to database */
CopyFrom(cstate);
- else /* copy from database to file */
+ else
+ /* copy from database to file */
DoCopyTo(cstate);
/*
}
else
{
- mode_t oumask; /* Pre-existing umask value */
+ mode_t oumask; /* Pre-existing umask value */
struct stat st;
/*
- * Prevent write to relative path ... too easy to shoot oneself in
- * the foot by overwriting a database file ...
+ * Prevent write to relative path ... too easy to shoot oneself in the
+ * foot by overwriting a database file ...
*/
if (!is_absolute_path(cstate->filename))
ereport(ERROR,
*/
if (cstate->need_transcoding)
cstate->null_print_client = pg_server_to_client(cstate->null_print,
- cstate->null_print_len);
+ cstate->null_print_len);
/* if a header has been requested send the line */
if (cstate->header_line)
CopySendEndOfRow(cstate);
MemoryContextSwitchTo(oldcontext);
-
+
cstate->processed++;
}
bool hit_eof = false;
bool result = false;
char mblen_str[2];
+
/* CSV variables */
bool first_char_in_line = true;
bool in_quote = false,
* assumed the same in frontend and backend encodings.
*
* For speed, we try to move data from raw_buf to line_buf in chunks
- * rather than one character at a time. raw_buf_ptr points to the next
+ * rather than one character at a time. raw_buf_ptr points to the next
* character to examine; any characters from raw_buf_index to raw_buf_ptr
- * have been determined to be part of the line, but not yet transferred
- * to line_buf.
+ * have been determined to be part of the line, but not yet transferred to
+ * line_buf.
*
* For a little extra speed within the loop, we copy raw_buf and
* raw_buf_len into local variables.
char c;
/*
- * Load more data if needed. Ideally we would just force four bytes
- * of read-ahead and avoid the many calls to
- * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE
- * protocol does not allow us to read too far ahead or we might
- * read into the next data, so we read-ahead only as far we know
- * we can. One optimization would be to read-ahead four byte here
- * if cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
- * considering the size of the buffer.
+ * Load more data if needed. Ideally we would just force four bytes
+ * of read-ahead and avoid the many calls to
+ * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE protocol
+ * does not allow us to read too far ahead or we might read into the
+ * next data, so we read-ahead only as far we know we can. One
+ * optimization would be to read-ahead four byte here if
+ * cstate->copy_dest != COPY_OLD_FE, but it hardly seems worth it,
+ * considering the size of the buffer.
*/
if (raw_buf_ptr >= copy_buf_len || need_data)
{
{
/*
* If character is '\\' or '\r', we may need to look ahead below.
- * Force fetch of the next character if we don't already have it. We
- * need to do this before changing CSV state, in case one of these
- * characters is also the quote or escape character.
+ * Force fetch of the next character if we don't already have it.
+ * We need to do this before changing CSV state, in case one of
+ * these characters is also the quote or escape character.
*
- * Note: old-protocol does not like forced prefetch, but it's OK here
- * since we cannot validly be at EOF.
+ * Note: old-protocol does not like forced prefetch, but it's OK
+ * here since we cannot validly be at EOF.
*/
if (c == '\\' || c == '\r')
{
}
/*
- * Dealing with quotes and escapes here is mildly tricky. If the quote
- * char is also the escape char, there's no problem - we just use the
- * char as a toggle. If they are different, we need to ensure that we
- * only take account of an escape inside a quoted field and
- * immediately preceding a quote char, and not the second in a
- * escape-escape sequence.
+ * Dealing with quotes and escapes here is mildly tricky. If the
+ * quote char is also the escape char, there's no problem - we
+ * just use the char as a toggle. If they are different, we need
+ * to ensure that we only take account of an escape inside a
+ * quoted field and immediately preceding a quote char, and not
+ * the second in a escape-escape sequence.
*/
if (in_quote && c == escapec)
last_was_esc = !last_was_esc;
/*
* Updating the line count for embedded CR and/or LF chars is
- * necessarily a little fragile - this test is probably about the best
- * we can do. (XXX it's arguable whether we should do this at all ---
- * is cur_lineno a physical or logical count?)
+ * necessarily a little fragile - this test is probably about the
+ * best we can do. (XXX it's arguable whether we should do this
+ * at all --- is cur_lineno a physical or logical count?)
*/
if (in_quote && c == (cstate->eol_type == EOL_NL ? '\n' : '\r'))
cstate->cur_lineno++;
if (cstate->eol_type == EOL_CRNL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
- "unquoted carriage return found in data"),
+ "unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
+
/*
* if we got here, it is the first line and we didn't find
* \n, so don't consume the peeked character
else if (cstate->eol_type == EOL_NL)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg(!cstate->csv_mode ?
+ errmsg(!cstate->csv_mode ?
"literal carriage return found in data" :
"unquoted carriage return found in data"),
errhint(!cstate->csv_mode ?
- "Use \"\\r\" to represent carriage return." :
- "Use quoted CSV field to represent carriage return.")));
+ "Use \"\\r\" to represent carriage return." :
+ "Use quoted CSV field to represent carriage return.")));
/* If reach here, we have found the line terminator */
break;
}
"unquoted newline found in data"),
errhint(!cstate->csv_mode ?
"Use \"\\n\" to represent newline." :
- "Use quoted CSV field to represent newline.")));
+ "Use quoted CSV field to represent newline.")));
cstate->eol_type = EOL_NL; /* in case not set yet */
/* If reach here, we have found the line terminator */
break;
}
/*
- * In CSV mode, we only recognize \. alone on a line. This is
- * because \. is a valid CSV data value.
+ * In CSV mode, we only recognize \. alone on a line. This is because
+ * \. is a valid CSV data value.
*/
if (c == '\\' && (!cstate->csv_mode || first_char_in_line))
{
break;
}
else if (!cstate->csv_mode)
+
/*
- * If we are here, it means we found a backslash followed by
- * something other than a period. In non-CSV mode, anything
- * after a backslash is special, so we skip over that second
- * character too. If we didn't do that \\. would be
- * considered an eof-of copy, while in non-CVS mode it is a
- * literal backslash followed by a period. In CSV mode,
- * backslashes are not special, so we want to process the
- * character after the backslash just like a normal character,
- * so we don't increment in those cases.
+ * If we are here, it means we found a backslash followed by
+ * something other than a period. In non-CSV mode, anything
+ * after a backslash is special, so we skip over that second
+ * character too. If we didn't do that \\. would be
+ * considered an eof-of copy, while in non-CVS mode it is a
+ * literal backslash followed by a period. In CSV mode,
+ * backslashes are not special, so we want to process the
+ * character after the backslash just like a normal character,
+ * so we don't increment in those cases.
*/
raw_buf_ptr++;
}
/*
- * This label is for CSV cases where \. appears at the start of a line,
- * but there is more text after it, meaning it was a data value.
+ * This label is for CSV cases where \. appears at the start of a
+ * line, but there is more text after it, meaning it was a data value.
* We are more strict for \. in CSV mode because \. could be a data
* value, while in non-CSV mode, \. cannot be a data value.
*/
/*
* Process all bytes of a multi-byte character as a group.
*
- * We only support multi-byte sequences where the first byte
- * has the high-bit set, so as an optimization we can avoid
- * this block entirely if it is not set.
+ * We only support multi-byte sequences where the first byte has the
+ * high-bit set, so as an optimization we can avoid this block
+ * entirely if it is not set.
*/
if (cstate->encoding_embeds_ascii && IS_HIGHBIT_SET(c))
{
/*
* We have to grovel through the string searching for control characters
* and instances of the delimiter character. In most cases, though, these
- * are infrequent. To avoid overhead from calling CopySendData once per
- * character, we dump out all characters between replaceable characters
- * in a single call. The loop invariant is that the data from "start"
- * to "ptr" can be sent literally, but hasn't yet been.
+ * are infrequent. To avoid overhead from calling CopySendData once per
+ * character, we dump out all characters between replaceable characters in
+ * a single call. The loop invariant is that the data from "start" to
+ * "ptr" can be sent literally, but hasn't yet been.
*/
start = ptr;
while ((c = *ptr) != '\0')
{
DUMPSOFAR();
CopySendChar(cstate, '\\');
- start = ptr; /* we include char in next run */
+ start = ptr; /* we include char in next run */
}
/*
if (!use_quote)
{
/*
- * Because '\.' can be a data value, quote it if it appears
- * alone on a line so it is not interpreted as the end-of-data
- * marker.
+ * Because '\.' can be a data value, quote it if it appears alone on a
+ * line so it is not interpreted as the end-of-data marker.
*/
if (single_attr && strcmp(ptr, "\\.") == 0)
- use_quote = true;
- else
- {
+ use_quote = true;
+ else
+ {
char *tptr = ptr;
while ((c = *tptr) != '\0')
{
if (rel != NULL)
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name, RelationGetRelationName(rel))));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name, RelationGetRelationName(rel))));
else
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" does not exist",
- name)));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" does not exist",
+ name)));
}
/* Check for duplicates */
if (list_member_int(attnums, attnum))
static void
copy_dest_receive(TupleTableSlot *slot, DestReceiver *self)
{
- DR_copy *myState = (DR_copy *) self;
+ DR_copy *myState = (DR_copy *) self;
CopyState cstate = myState->cstate;
/* Make sure the tuple is fully deconstructed */
DestReceiver *
CreateCopyDestReceiver(void)
{
- DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
+ DR_copy *self = (DR_copy *) palloc(sizeof(DR_copy));
self->pub.receiveSlot = copy_dest_receive;
self->pub.rStartup = copy_dest_startup;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.184 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.185 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Check for db name conflict. This is just to give a more friendly
- * error message than "unique index violation". There's a race condition
- * but we're willing to accept the less friendly message in that case.
+ * Check for db name conflict. This is just to give a more friendly error
+ * message than "unique index violation". There's a race condition but
+ * we're willing to accept the less friendly message in that case.
*/
if (OidIsValid(get_database_oid(dbname)))
ereport(ERROR,
errmsg("database \"%s\" already exists", dbname)));
/*
- * Insert a new tuple into pg_database. This establishes our ownership
- * of the new database name (anyone else trying to insert the same name
- * will block on the unique index, and fail after we commit). It also
- * assigns the OID that the new database will have.
+ * Insert a new tuple into pg_database. This establishes our ownership of
+ * the new database name (anyone else trying to insert the same name will
+ * block on the unique index, and fail after we commit). It also assigns
+ * the OID that the new database will have.
*/
pg_database_rel = heap_open(DatabaseRelationId, RowExclusiveLock);
/*
* We deliberately set datconfig and datacl to defaults (NULL), rather
- * than copying them from the template database. Copying datacl would
- * be a bad idea when the owner is not the same as the template's
- * owner. It's more debatable whether datconfig should be copied.
+ * than copying them from the template database. Copying datacl would be
+ * a bad idea when the owner is not the same as the template's owner. It's
+ * more debatable whether datconfig should be copied.
*/
new_record_nulls[Anum_pg_database_datconfig - 1] = 'n';
new_record_nulls[Anum_pg_database_datacl - 1] = 'n';
RequestCheckpoint(true, false);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_database_rel, NoLock);
errmsg("cannot drop the currently open database")));
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this to ensure that no new backend starts up in the target
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this to ensure that no new backend starts up in the target
* database while we are deleting it (see postinit.c), and that no one is
* using it as a CREATE DATABASE template or trying to delete it for
* themselves.
errmsg("cannot drop a template database")));
/*
- * Check for active backends in the target database. (Because we hold
- * the database lock, no new ones can start after this.)
+ * Check for active backends in the target database. (Because we hold the
+ * database lock, no new ones can start after this.)
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
remove_dbtablespaces(db_id);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pgdbrel, NoLock);
Relation rel;
/*
- * Look up the target database's OID, and get exclusive lock on it.
- * We need this for the same reasons as DROP DATABASE.
+ * Look up the target database's OID, and get exclusive lock on it. We
+ * need this for the same reasons as DROP DATABASE.
*/
rel = heap_open(DatabaseRelationId, RowExclusiveLock);
errmsg("current database may not be renamed")));
/*
- * Make sure the database does not have active sessions. This is the
- * same concern as above, but applied to other sessions.
+ * Make sure the database does not have active sessions. This is the same
+ * concern as above, but applied to other sessions.
*/
if (DatabaseHasActiveBackends(db_id, false))
ereport(ERROR,
CatalogUpdateIndexes(rel, newtup);
/*
- * Close pg_database, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_database, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
relation = heap_open(DatabaseRelationId, AccessShareLock);
/*
- * Loop covers the rare case where the database is renamed before we
- * can lock it. We try again just in case we can find a new one of
- * the same name.
+ * Loop covers the rare case where the database is renamed before we can
+ * lock it. We try again just in case we can find a new one of the same
+ * name.
*/
for (;;)
{
Oid dbOid;
/*
- * there's no syscache for database-indexed-by-name,
- * so must do it the hard way
+ * there's no syscache for database-indexed-by-name, so must do it the
+ * hard way
*/
ScanKeyInit(&scanKey,
Anum_pg_database_datname,
LockSharedObject(DatabaseRelationId, dbOid, 0, lockmode);
/*
- * And now, re-fetch the tuple by OID. If it's still there and
- * still the same name, we win; else, drop the lock and loop
- * back to try again.
+ * And now, re-fetch the tuple by OID. If it's still there and still
+ * the same name, we win; else, drop the lock and loop back to try
+ * again.
*/
tuple = SearchSysCache(DATABASEOID,
ObjectIdGetDatum(dbOid),
Oid oid;
/*
- * There's no syscache for pg_database indexed by name,
- * so we must look the hard way.
+ * There's no syscache for pg_database indexed by name, so we must look
+ * the hard way.
*/
pg_database = heap_open(DatabaseRelationId, AccessShareLock);
ScanKeyInit(&entry[0],
xl_dbase_create_rec *xlrec = (xl_dbase_create_rec *) rec;
appendStringInfo(buf, "create db: copy dir %u/%u to %u/%u",
- xlrec->src_db_id, xlrec->src_tablespace_id,
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->src_db_id, xlrec->src_tablespace_id,
+ xlrec->db_id, xlrec->tablespace_id);
}
else if (info == XLOG_DBASE_DROP)
{
xl_dbase_drop_rec *xlrec = (xl_dbase_drop_rec *) rec;
appendStringInfo(buf, "drop db: dir %u/%u",
- xlrec->db_id, xlrec->tablespace_id);
+ xlrec->db_id, xlrec->tablespace_id);
}
else
appendStringInfo(buf, "UNKNOWN");
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.97 2006/07/03 22:45:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.98 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
*/
if (def->arg == NULL)
return true;
+
/*
* Allow 0, 1, "true", "false"
*/
break;
default:
{
- char *sval = defGetString(def);
+ char *sval = defGetString(def);
if (pg_strcasecmp(sval, "true") == 0)
return true;
DefElem *
defWithOids(bool value)
{
- DefElem *f = makeNode(DefElem);
+ DefElem *f = makeNode(DefElem);
f->defname = "oids";
- f->arg = (Node *)makeInteger(value);
+ f->arg = (Node *) makeInteger(value);
return f;
}
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.151 2006/09/06 20:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.152 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} ExplainState;
static void ExplainOneQuery(Query *query, ExplainStmt *stmt,
- ParamListInfo params, TupOutputState *tstate);
+ ParamListInfo params, TupOutputState *tstate);
static double elapsed_time(instr_time *starttime);
static void explain_outNode(StringInfo str,
Plan *plan, PlanState *planstate,
* The tidquals list has OR semantics, so be sure to show it
* as an OR condition.
*/
- List *tidquals = ((TidScan *) plan)->tidquals;
+ List *tidquals = ((TidScan *) plan)->tidquals;
if (list_length(tidquals) > 1)
tidquals = list_make1(make_orclause(tidquals));
/*
* Ordinarily we don't pass down our own outer_plan value to our
* child nodes, but in an Append we must, since we might be
- * looking at an appendrel indexscan with outer references
- * from the member scans.
+ * looking at an appendrel indexscan with outer references from
+ * the member scans.
*/
explain_outNode(str, subnode,
appendstate->appendplans[j],
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.78 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.79 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
* Find the function, do permissions and validity checks
*/
funcOid = LookupFuncNameTypeNames(functionName, argTypes, stmt->missing_ok);
- if (!OidIsValid(funcOid))
+ if (!OidIsValid(funcOid))
{
/* can only get here if stmt->missing_ok */
ereport(NOTICE,
0, 0);
if (!HeapTupleIsValid(tuple))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("cast from type %s to type %s does not exist",
TypeNameToString(stmt->targettype))));
else
ereport(NOTICE,
- (errmsg("cast from type %s to type %s does not exist ... skipping",
+ (errmsg("cast from type %s to type %s does not exist ... skipping",
TypeNameToString(stmt->sourcetype),
TypeNameToString(stmt->targettype))));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.148 2006/08/27 19:14:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.149 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
LockRelId heaprelid;
LOCKTAG heaplocktag;
Snapshot snapshot;
- Relation pg_index;
- HeapTuple indexTuple;
+ Relation pg_index;
+ HeapTuple indexTuple;
Form_pg_index indexForm;
/*
* for an overview of how this works)
*
* We must commit our current transaction so that the index becomes
- * visible; then start another. Note that all the data structures
- * we just built are lost in the commit. The only data we keep past
- * here are the relation IDs.
+ * visible; then start another. Note that all the data structures we just
+ * built are lost in the commit. The only data we keep past here are the
+ * relation IDs.
*
* Before committing, get a session-level lock on the table, to ensure
- * that neither it nor the index can be dropped before we finish.
- * This cannot block, even if someone else is waiting for access, because
- * we already have the same lock within our transaction.
+ * that neither it nor the index can be dropped before we finish. This
+ * cannot block, even if someone else is waiting for access, because we
+ * already have the same lock within our transaction.
*
* Note: we don't currently bother with a session lock on the index,
- * because there are no operations that could change its state while
- * we hold lock on the parent table. This might need to change later.
+ * because there are no operations that could change its state while we
+ * hold lock on the parent table. This might need to change later.
*/
heaprelid = rel->rd_lockInfo.lockRelId;
LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
/*
* Now we must wait until no running transaction could have the table open
- * with the old list of indexes. To do this, inquire which xacts currently
- * would conflict with ShareLock on the table -- ie, which ones have
- * a lock that permits writing the table. Then wait for each of these
- * xacts to commit or abort. Note we do not need to worry about xacts
- * that open the table for writing after this point; they will see the
- * new index when they open it.
+ * with the old list of indexes. To do this, inquire which xacts
+ * currently would conflict with ShareLock on the table -- ie, which ones
+ * have a lock that permits writing the table. Then wait for each of
+ * these xacts to commit or abort. Note we do not need to worry about
+ * xacts that open the table for writing after this point; they will see
+ * the new index when they open it.
*
- * Note: GetLockConflicts() never reports our own xid,
- * hence we need not check for that.
+ * Note: GetLockConflicts() never reports our own xid, hence we need not
+ * check for that.
*/
SET_LOCKTAG_RELATION(heaplocktag, heaprelid.dbId, heaprelid.relId);
old_xact_list = GetLockConflicts(&heaplocktag, ShareLock);
/*
* Now take the "reference snapshot" that will be used by validate_index()
- * to filter candidate tuples. All other transactions running at this
+ * to filter candidate tuples. All other transactions running at this
* time will have to be out-waited before we can commit, because we can't
* guarantee that tuples deleted just before this will be in the index.
*
- * We also set ActiveSnapshot to this snap, since functions in indexes
- * may need a snapshot.
+ * We also set ActiveSnapshot to this snap, since functions in indexes may
+ * need a snapshot.
*/
snapshot = CopySnapshot(GetTransactionSnapshot());
ActiveSnapshot = snapshot;
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted
- * just before the reference snap was taken, we have to wait out any
- * transactions older than the reference snap. We can do this by
- * waiting for each xact explicitly listed in the snap.
+ * interesting tuples. But since it might not contain tuples deleted just
+ * before the reference snap was taken, we have to wait out any
+ * transactions older than the reference snap. We can do this by waiting
+ * for each xact explicitly listed in the snap.
*
- * Note: GetSnapshotData() never stores our own xid into a snap,
- * hence we need not check for that.
+ * Note: GetSnapshotData() never stores our own xid into a snap, hence we
+ * need not check for that.
*/
for (ixcnt = 0; ixcnt < snapshot->xcnt; ixcnt++)
XactLockTableWait(snapshot->xip[ixcnt]);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.48 2006/07/18 17:42:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.49 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* A minimum expectation therefore is that the caller have execute
* privilege with grant option. Since we don't have a way to make the
* opclass go away if the grant option is revoked, we choose instead to
- * require ownership of the functions. It's also not entirely clear what
+ * require ownership of the functions. It's also not entirely clear what
* permissions should be required on the datatype, but ownership seems
* like a safe choice.
*
opcID = OpclassnameGetOpcid(amID, opcname);
if (!OidIsValid(opcID))
{
- if (! stmt -> missing_ok )
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
opcname, stmt->amname)));
-
+
return;
}
-
+
tuple = SearchSysCache(CLAOID,
ObjectIdGetDatum(opcID),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
{
-
- if (! stmt->missing_ok )
+
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
else
ereport(NOTICE,
(errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(stmt->opclassname), stmt->amname)));
+ NameListToString(stmt->opclassname), stmt->amname)));
return;
}
-
+
opcID = HeapTupleGetOid(tuple);
/* Permission check: must own opclass or its namespace */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.32 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.33 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
operOid = LookupOperNameTypeNames(NULL, operatorName,
typeName1, typeName2,
stmt->missing_ok, -1);
-
- if (stmt->missing_ok &&!OidIsValid(operOid) )
- {
- ereport(NOTICE,
- (errmsg("operator %s does not exist, skipping",
- NameListToString(operatorName))));
- return;
- }
+
+ if (stmt->missing_ok && !OidIsValid(operOid))
+ {
+ ereport(NOTICE,
+ (errmsg("operator %s does not exist, skipping",
+ NameListToString(operatorName))));
+ return;
+ }
tup = SearchSysCache(OPEROID,
ObjectIdGetDatum(operOid),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.55 2006/09/07 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.56 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
plan = copyObject(plan);
/*
- * XXX: debug_query_string is wrong here: the user might have
- * submitted multiple semicolon delimited queries.
+ * XXX: debug_query_string is wrong here: the user might have submitted
+ * multiple semicolon delimited queries.
*/
PortalDefineQuery(portal,
NULL,
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.65 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.66 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
portal = CreateNewPortal();
/* Don't display the portal in pg_cursors, it is for internal use only */
portal->visible = false;
-
+
/*
* For CREATE TABLE / AS EXECUTE, make a copy of the stored query so that
* we can modify its destination (yech, but this has always been ugly).
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
forboth(le, exprstates, la, argtypes)
Datum
pg_prepared_statement(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
- PreparedStatement *prep_stmt;
+ FuncCallContext *funcctx;
+ HASH_SEQ_STATUS *hash_seq;
+ PreparedStatement *prep_stmt;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
- MemoryContext oldcontext;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx->user_fctx = NULL;
/*
- * build tupdesc for result tuples. This must match the
- * definition of the pg_prepared_statements view in
- * system_views.sql
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_prepared_statements view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(5, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
prep_stmt = hash_seq_search(hash_seq);
if (prep_stmt)
{
- Datum result;
- HeapTuple tuple;
- Datum values[5];
- bool nulls[5];
+ Datum result;
+ HeapTuple tuple;
+ Datum values[5];
+ bool nulls[5];
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->stmt_name));
+ CStringGetDatum(prep_stmt->stmt_name));
if (prep_stmt->query_string == NULL)
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(prep_stmt->query_string));
+ CStringGetDatum(prep_stmt->query_string));
values[2] = TimestampTzGetDatum(prep_stmt->prepare_time);
values[3] = build_regtype_array(prep_stmt->argtype_list);
i = 0;
foreach(lc, oid_list)
{
- Oid oid;
- Datum oid_str;
+ Oid oid;
+ Datum oid_str;
oid = lfirst_oid(lc);
oid_str = DirectFunctionCall1(oidout, ObjectIdGetDatum(oid));
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.68 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.69 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
0, 0, 0);
if (!HeapTupleIsValid(langTup))
{
- if (! stmt->missing_ok)
+ if (!stmt->missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", languageName)));
- else
+ else
ereport(NOTICE,
- (errmsg("language \"%s\" does not exist, skipping",
+ (errmsg("language \"%s\" does not exist, skipping",
languageName)));
-
+
return;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.139 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.140 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel);
static Form_pg_sequence read_info(SeqTable elm, Relation rel, Buffer *buf);
static void init_params(List *options, bool isInit,
- Form_pg_sequence new, List **owned_by);
+ Form_pg_sequence new, List **owned_by);
static void do_setval(Oid relid, int64 next, bool iscalled);
static void process_owned_by(Relation seqrel, List *owned_by);
static void
init_sequence(Oid relid, SeqTable *p_elm, Relation *p_rel)
{
- SeqTable elm;
+ SeqTable elm;
Relation seqrel;
/* Look to see if we already have a seqtable entry for relation */
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("invalid OWNED BY option"),
- errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
+ errhint("Specify OWNED BY table.column or OWNED BY NONE.")));
tablerel = NULL;
attnum = 0;
}
if (seqrel->rd_rel->relowner != tablerel->rd_rel->relowner)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("sequence must have same owner as table it is owned by")));
+ errmsg("sequence must have same owner as table it is owned by")));
if (RelationGetNamespace(seqrel) != RelationGetNamespace(tablerel))
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
}
/*
- * OK, we are ready to update pg_depend. First remove any existing
- * AUTO dependencies for the sequence, then optionally add a new one.
+ * OK, we are ready to update pg_depend. First remove any existing AUTO
+ * dependencies for the sequence, then optionally add a new one.
*/
markSequenceUnowned(RelationGetRelid(seqrel));
}
appendStringInfo(buf, "rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.202 2006/09/04 21:15:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.203 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool change_varattnos_walker(Node *node, const AttrNumber *newattno);
static void StoreCatalogInheritance(Oid relationId, List *supers);
static void StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation catalogRelation);
+ int16 seqNumber, Relation catalogRelation);
static int findAttrByName(const char *attributeName, List *schema);
static void setRelhassubclassInRelation(Oid relationId, bool relhassubclass);
static void AlterIndexNamespaces(Relation classRel, Relation rel,
}
/*
- * In CASCADE mode, suck in all referencing relations as well. This
- * requires multiple iterations to find indirectly-dependent relations.
- * At each phase, we need to exclusive-lock new rels before looking
- * for their dependencies, else we might miss something. Also, we
- * check each rel as soon as we open it, to avoid a faux pas such as
- * holding lock for a long time on a rel we have no permissions for.
+ * In CASCADE mode, suck in all referencing relations as well. This
+ * requires multiple iterations to find indirectly-dependent relations. At
+ * each phase, we need to exclusive-lock new rels before looking for their
+ * dependencies, else we might miss something. Also, we check each rel as
+ * soon as we open it, to avoid a faux pas such as holding lock for a long
+ * time on a rel we have no permissions for.
*/
if (stmt->behavior == DROP_CASCADE)
{
for (;;)
{
- List *newrelids;
+ List *newrelids;
newrelids = heap_truncate_find_FKs(relids);
if (newrelids == NIL)
foreach(cell, newrelids)
{
- Oid relid = lfirst_oid(cell);
+ Oid relid = lfirst_oid(cell);
Relation rel;
rel = heap_open(relid, AccessExclusiveLock);
/*
* Check foreign key references. In CASCADE mode, this should be
- * unnecessary since we just pulled in all the references; but as
- * a cross-check, do it anyway if in an Assert-enabled build.
+ * unnecessary since we just pulled in all the references; but as a
+ * cross-check, do it anyway if in an Assert-enabled build.
*/
#ifdef USE_ASSERT_CHECKING
heap_truncate_check_FKs(rels, false);
#endif
/*
- * Also check for pending AFTER trigger events on the target relations.
- * We can't just leave those be, since they will try to fetch tuples
- * that the TRUNCATE removes.
+ * Also check for pending AFTER trigger events on the target relations. We
+ * can't just leave those be, since they will try to fetch tuples that the
+ * TRUNCATE removes.
*/
AfterTriggerCheckTruncate(relids);
}
/*
- * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
+ * Check that a given rel is safe to truncate. Subroutine for ExecuteTruncate
*/
static void
truncate_check_rel(Relation rel)
RelationGetRelationName(rel))));
/*
- * We can never allow truncation of shared or nailed-in-cache
- * relations, because we can't support changing their relfilenode
- * values.
+ * We can never allow truncation of shared or nailed-in-cache relations,
+ * because we can't support changing their relfilenode values.
*/
if (rel->rd_rel->relisshared || rel->rd_isnailed)
ereport(ERROR,
RelationGetRelationName(rel))));
/*
- * Don't allow truncate on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow truncate on temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
}
/*----------
AttrNumber *
varattnos_map(TupleDesc old, TupleDesc new)
{
- int i,j;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i,
+ j;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- for (j=1; j<= new->natts; j++)
- if (!strcmp(NameStr(old->attrs[i-1]->attname), NameStr(new->attrs[j-1]->attname)))
- attmap[i-1] = j;
+ for (j = 1; j <= new->natts; j++)
+ if (!strcmp(NameStr(old->attrs[i - 1]->attname), NameStr(new->attrs[j - 1]->attname)))
+ attmap[i - 1] = j;
}
return attmap;
}
* ColumnDefs
*/
AttrNumber *
-varattnos_map_schema(TupleDesc old, List *schema)
+varattnos_map_schema(TupleDesc old, List *schema)
{
- int i;
- AttrNumber *attmap = palloc0(sizeof(AttrNumber)*old->natts);
- for (i=1; i <= old->natts; i++) {
- if (old->attrs[i-1]->attisdropped) {
- attmap[i-1] = 0;
+ int i;
+ AttrNumber *attmap = palloc0(sizeof(AttrNumber) * old->natts);
+
+ for (i = 1; i <= old->natts; i++)
+ {
+ if (old->attrs[i - 1]->attisdropped)
+ {
+ attmap[i - 1] = 0;
continue;
}
- attmap[i-1] = findAttrByName(NameStr(old->attrs[i-1]->attname), schema);
+ attmap[i - 1] = findAttrByName(NameStr(old->attrs[i - 1]->attname), schema);
}
return attmap;
}
static void
StoreCatalogInheritance1(Oid relationId, Oid parentOid,
- int16 seqNumber, Relation relation)
+ int16 seqNumber, Relation relation)
{
- Datum datum[Natts_pg_inherits];
- char nullarr[Natts_pg_inherits];
- ObjectAddress childobject,
- parentobject;
- HeapTuple tuple;
- TupleDesc desc = RelationGetDescr(relation);
+ Datum datum[Natts_pg_inherits];
+ char nullarr[Natts_pg_inherits];
+ ObjectAddress childobject,
+ parentobject;
+ HeapTuple tuple;
+ TupleDesc desc = RelationGetDescr(relation);
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
ATPrepSetTableSpace(tab, rel, cmd->name);
pass = AT_PASS_MISC; /* doesn't actually matter */
break;
- case AT_SetRelOptions: /* SET (...) */
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_SetRelOptions: /* SET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATSimplePermissionsRelationOrIndex(rel);
/* This command never recurses */
/* No command-specific prep needed */
* Nothing to do here; Phase 3 does the work
*/
break;
- case AT_SetRelOptions: /* SET (...) */
+ case AT_SetRelOptions: /* SET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, false);
break;
- case AT_ResetRelOptions: /* RESET (...) */
+ case AT_ResetRelOptions: /* RESET (...) */
ATExecSetRelOptions(rel, (List *) cmd->def, true);
break;
case AT_EnableTrig: /* ENABLE TRIGGER name */
/*
* If we are rebuilding the tuples OR if we added any new NOT NULL
* constraints, check all not-null constraints. This is a bit of
- * overkill but it minimizes risk of bugs, and heap_attisnull is
- * a pretty cheap test anyway.
+ * overkill but it minimizes risk of bugs, and heap_attisnull is a
+ * pretty cheap test anyway.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
foreach(l, notnull_attrs)
{
- int attn = lfirst_int(l);
+ int attn = lfirst_int(l);
- if (heap_attisnull(tuple, attn+1))
+ if (heap_attisnull(tuple, attn + 1))
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
- NameStr(newTupDesc->attrs[attn]->attname))));
+ NameStr(newTupDesc->attrs[attn]->attname))));
}
foreach(l, tab->constraints)
if (!list_member_oid(tab->changedConstraintOids,
foundObject.objectId))
{
- char *defstring = pg_get_constraintdef_string(foundObject.objectId);
+ char *defstring = pg_get_constraintdef_string(foundObject.objectId);
/*
* Put NORMAL dependencies at the front of the list and
/*
* Now we can drop the existing constraints and indexes --- constraints
* first, since some of them might depend on the indexes. In fact, we
- * have to delete FOREIGN KEY constraints before UNIQUE constraints,
- * but we already ordered the constraint list to ensure that would happen.
- * It should be okay to use DROP_RESTRICT here, since nothing else should
- * be depending on these objects.
+ * have to delete FOREIGN KEY constraints before UNIQUE constraints, but
+ * we already ordered the constraint list to ensure that would happen. It
+ * should be okay to use DROP_RESTRICT here, since nothing else should be
+ * depending on these objects.
*/
foreach(l, tab->changedConstraintOids)
{
tuple_class->relowner != newOwnerId)
{
/* if it's an owned sequence, disallow changing it by itself */
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relationOid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot change owner of sequence \"%s\"",
NameStr(tuple_class->relname)),
- errdetail("Sequence \"%s\" is linked to table \"%s\".",
- NameStr(tuple_class->relname),
- get_rel_name(tableId))));
+ errdetail("Sequence \"%s\" is linked to table \"%s\".",
+ NameStr(tuple_class->relname),
+ get_rel_name(tableId))));
}
break;
case RELKIND_TOASTVALUE:
}
static char *
-decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
+decompile_conbin(HeapTuple contup, TupleDesc tupdesc)
{
- Form_pg_constraint con;
- bool isnull;
- Datum attr;
- Datum expr;
+ Form_pg_constraint con;
+ bool isnull;
+ Datum attr;
+ Datum expr;
con = (Form_pg_constraint) GETSTRUCT(contup);
attr = heap_getattr(contup, Anum_pg_constraint_conbin, tupdesc, &isnull);
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("table \"%s\" without OIDs cannot inherit from table \"%s\" with OIDs",
- RelationGetRelationName(child_rel), parent->relname)));
+ RelationGetRelationName(child_rel), parent->relname)));
/*
* Don't allow any duplicates in the list of parents. We scan through the
heap_close(catalogRelation, RowExclusiveLock);
/*
- * If the new parent is found in our list of inheritors, we have a circular
- * structure
+ * If the new parent is found in our list of inheritors, we have a
+ * circular structure
*/
children = find_all_inheritors(RelationGetRelid(child_rel));
static void
MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
{
- Relation attrdesc;
- AttrNumber parent_attno;
- int parent_natts;
- TupleDesc tupleDesc;
+ Relation attrdesc;
+ AttrNumber parent_attno;
+ int parent_natts;
+ TupleDesc tupleDesc;
TupleConstr *constr;
- HeapTuple tuple;
+ HeapTuple tuple;
tupleDesc = RelationGetDescr(parent_rel);
parent_natts = tupleDesc->natts;
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(child_rel), NameStr(attribute->attname))));
+ RelationGetRelationName(child_rel), NameStr(attribute->attname))));
if (attribute->attnotnull && !childatt->attnotnull)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" in child table must be NOT NULL",
- NameStr(attribute->attname))));
+ errmsg("column \"%s\" in child table must be NOT NULL",
+ NameStr(attribute->attname))));
childatt->attinhcount++;
simple_heap_update(attrdesc, &tuple->t_self, tuple);
/* if it's an owned sequence, disallow moving it by itself */
if (rel->rd_rel->relkind == RELKIND_SEQUENCE)
{
- Oid tableId;
- int32 colId;
+ Oid tableId;
+ int32 colId;
if (sequenceIsOwned(relid, &tableId, &colId))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move an owned sequence into another schema"),
+ errmsg("cannot move an owned sequence into another schema"),
errdetail("Sequence \"%s\" is linked to table \"%s\".",
RelationGetRelationName(rel),
get_rel_name(tableId))));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.38 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.39 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!HeapTupleIsValid(tuple))
{
- if ( ! stmt->missing_ok )
+ if (!stmt->missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
}
/*
- * Note: because we checked that the tablespace was empty, there should
- * be no need to worry about flushing shared buffers or free space map
+ * Note: because we checked that the tablespace was empty, there should be
+ * no need to worry about flushing shared buffers or free space map
* entries for relations in the tablespace.
*/
xl_tblspc_create_rec *xlrec = (xl_tblspc_create_rec *) rec;
appendStringInfo(buf, "create ts: %u \"%s\"",
- xlrec->ts_id, xlrec->ts_path);
+ xlrec->ts_id, xlrec->ts_path);
}
else if (info == XLOG_TBLSPC_DROP)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.208 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.209 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!HeapTupleIsValid(tup))
{
- if (! missing_ok)
+ if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("trigger \"%s\" for table \"%s\" does not exist",
/*
* Note: since we scan the triggers using TriggerRelidNameIndexId, we will
* be reading the triggers in name order, except possibly during
- * emergency-recovery operations (ie, IgnoreSystemIndexes). This in
- * turn ensures that triggers will be fired in name order.
+ * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
+ * ensures that triggers will be fired in name order.
*/
ScanKeyInit(&skey,
Anum_pg_trigger_tgrelid,
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- constraint->catalogname, constraint->schemaname,
+ constraint->catalogname, constraint->schemaname,
constraint->relname)));
}
- /*
+ /*
* If we're given the schema name with the constraint, look only
- * in that schema. If given a bare constraint name, use the
+ * in that schema. If given a bare constraint name, use the
* search path to find the first matching constraint.
*/
- if (constraint->schemaname) {
- Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+ if (constraint->schemaname)
+ {
+ Oid namespaceId = LookupExplicitNamespace(constraint->schemaname);
+
namespaceSearchList = list_make1_oid(namespaceId);
- } else {
+ }
+ else
+ {
namespaceSearchList = fetch_search_path(true);
}
found = false;
foreach(namespaceSearchCell, namespaceSearchList)
{
- Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
+ Oid searchNamespaceId = lfirst_oid(namespaceSearchCell);
/*
* Setup to scan pg_trigger by tgconstrname ...
while (HeapTupleIsValid(htup = systable_getnext(tgscan)))
{
Form_pg_trigger pg_trigger = (Form_pg_trigger) GETSTRUCT(htup);
- Oid constraintNamespaceId;
+ Oid constraintNamespaceId;
/*
* Foreign key constraints have triggers on both the
- * parent and child tables. Since these tables may be
- * in different schemas we must pick the child table
- * because that table "owns" the constraint.
+ * parent and child tables. Since these tables may be in
+ * different schemas we must pick the child table because
+ * that table "owns" the constraint.
*
* Referential triggers on the parent table other than
- * NOACTION_DEL and NOACTION_UPD are ignored below, so
- * it is possible to not check them here, but it seems
- * safer to always check.
+ * NOACTION_DEL and NOACTION_UPD are ignored below, so it
+ * is possible to not check them here, but it seems safer
+ * to always check.
*/
if (pg_trigger->tgfoid == F_RI_FKEY_NOACTION_DEL ||
pg_trigger->tgfoid == F_RI_FKEY_NOACTION_UPD ||
constraintNamespaceId = get_rel_namespace(pg_trigger->tgrelid);
/*
- * If this constraint is not in the schema we're
- * currently searching for, keep looking.
+ * If this constraint is not in the schema we're currently
+ * searching for, keep looking.
*/
if (constraintNamespaceId != searchNamespaceId)
continue;
/*
- * If we found some, check that they fit the deferrability but
- * skip referential action ones, since they are silently never
- * deferrable.
+ * If we found some, check that they fit the deferrability
+ * but skip referential action ones, since they are
+ * silently never deferrable.
*/
if (pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_UPD &&
pg_trigger->tgfoid != F_RI_FKEY_RESTRICT_DEL &&
if (stmt->deferred && !pg_trigger->tgdeferrable)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("constraint \"%s\" is not deferrable",
- constraint->relname)));
+ errmsg("constraint \"%s\" is not deferrable",
+ constraint->relname)));
oidlist = lappend_oid(oidlist, HeapTupleGetOid(htup));
}
found = true;
event = event->ate_next)
{
/*
- * We can ignore completed events. (Even if a DONE flag is rolled
- * back by subxact abort, it's OK because the effects of the
- * TRUNCATE must get rolled back too.)
+ * We can ignore completed events. (Even if a DONE flag is rolled
+ * back by subxact abort, it's OK because the effects of the TRUNCATE
+ * must get rolled back too.)
*/
if (event->ate_event & AFTER_TRIGGER_DONE)
continue;
}
/*
- * Also scan events queued by incomplete queries. This could only
- * matter if a TRUNCATE is executed by a function or trigger within
- * an updating query on the same relation, which is pretty perverse,
- * but let's check.
+ * Also scan events queued by incomplete queries. This could only matter
+ * if a TRUNCATE is executed by a function or trigger within an updating
+ * query on the same relation, which is pretty perverse, but let's check.
*/
for (depth = 0; depth <= afterTriggers->query_depth; depth++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.96 2006/07/31 20:09:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.97 2006/10/04 00:29:51 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
/*
- * Check permissions on functions. We choose to require the creator/owner
- * of a type to also own the underlying functions. Since creating a type
+ * Check permissions on functions. We choose to require the creator/owner
+ * of a type to also own the underlying functions. Since creating a type
* is tantamount to granting public execute access on the functions, the
- * minimum sane check would be for execute-with-grant-option. But we don't
- * have a way to make the type go away if the grant option is revoked, so
- * ownership seems better.
+ * minimum sane check would be for execute-with-grant-option. But we
+ * don't have a way to make the type go away if the grant option is
+ * revoked, so ownership seems better.
*/
if (inputOid && !pg_proc_ownercheck(inputOid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_PROC,
/*
* Base type must be a plain base type or another domain. Domains over
- * pseudotypes would create a security hole. Domains over composite
- * types might be made to work in the future, but not today.
+ * pseudotypes would create a security hole. Domains over composite types
+ * might be made to work in the future, but not today.
*/
typtype = baseType->typtype;
if (typtype != 'b' && typtype != 'd')
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in check constraint")));
+ errmsg("cannot use aggregate function in check constraint")));
/*
* Convert to string form for storage.
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.173 2006/07/13 16:49:14 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.174 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
GetUserId(), false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
false);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
else
{
ereport(NOTICE,
- (errmsg("role \"%s\" does not exist, skipping",
+ (errmsg("role \"%s\" does not exist, skipping",
role)));
}
ReleaseSysCache(oldtuple);
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(rel, NoLock);
}
/*
- * Close pg_authid, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authid, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authid_rel, NoLock);
* Drop the objects owned by a given list of roles.
*/
void
-DropOwnedObjects(DropOwnedStmt * stmt)
+DropOwnedObjects(DropOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
* Give the objects owned by a given list of roles away to another user.
*/
void
-ReassignOwnedObjects(ReassignOwnedStmt * stmt)
+ReassignOwnedObjects(ReassignOwnedStmt *stmt)
{
List *role_ids = roleNamesToIds(stmt->roles);
ListCell *cell;
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
}
/*
- * Close pg_authmem, but keep lock till commit (this is important
- * to prevent any risk of deadlock failure while updating flat file)
+ * Close pg_authmem, but keep lock till commit (this is important to
+ * prevent any risk of deadlock failure while updating flat file)
*/
heap_close(pg_authmem_rel, NoLock);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.340 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.341 2006/10/04 00:29:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Size min_tlen;
Size max_tlen;
bool hasindex;
- TransactionId minxid; /* Minimum Xid present anywhere on table */
+ TransactionId minxid; /* Minimum Xid present anywhere on table */
/* vtlinks array for tuple chain following - sorted by new_tid */
int num_vtlinks;
VTupleLink vtlinks;
static int vac_cmp_offno(const void *left, const void *right);
static int vac_cmp_vtlinks(const void *left, const void *right);
static bool enough_space(VacPage vacpage, Size len);
-static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
+static Size PageGetFreeSpaceWithFillFactor(Relation relation, Page page);
/****************************************************************************
errhint("Use VACUUM FULL, then VACUUM FREEZE.")));
/*
- * Send info about dead objects to the statistics collector, unless
- * we are in autovacuum --- autovacuum.c does this for itself.
+ * Send info about dead objects to the statistics collector, unless we are
+ * in autovacuum --- autovacuum.c does this for itself.
*/
if (vacstmt->vacuum && !IsAutoVacuumProcess())
pgstat_vacuum_tabstat();
* PostgresMain().
*/
StartTransactionCommand();
+
/*
- * Re-establish the transaction snapshot. This is wasted effort
- * when we are called as a normal utility command, because the
- * new transaction will be dropped immediately by PostgresMain();
- * but it's necessary if we are called from autovacuum because
- * autovacuum might continue on to do an ANALYZE-only call.
+ * Re-establish the transaction snapshot. This is wasted effort when
+ * we are called as a normal utility command, because the new
+ * transaction will be dropped immediately by PostgresMain(); but it's
+ * necessary if we are called from autovacuum because autovacuum might
+ * continue on to do an ANALYZE-only call.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
}
if (vacstmt->vacuum)
{
- TransactionId minxid,
- vacuumxid;
+ TransactionId minxid,
+ vacuumxid;
/*
* If it was a database-wide VACUUM, print FSM usage statistics (we
TransactionId limit;
/*
- * We can always ignore processes running lazy vacuum. This is because we
+ * We can always ignore processes running lazy vacuum. This is because we
* use these values only for deciding which tuples we must keep in the
- * tables. Since lazy vacuum doesn't write its xid to the table, it's
+ * tables. Since lazy vacuum doesn't write its xid to the table, it's
* safe to ignore it. In theory it could be problematic to ignore lazy
* vacuums on a full vacuum, but keep in mind that only one vacuum process
* can be working on a particular table at any time, and that each vacuum
pgcform->relhasindex = hasindex;
dirty = true;
}
+
/*
* If we have discovered that there are no indexes, then there's no
* primary key either. This could be done more thoroughly...
/*
* vac_update_dbminxid() -- update the minimum Xid present in one database
*
- * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
- * of it. datminxid is updated to the minimum of all relminxid found in
- * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
- * found in pg_class. The values are also returned in minxid and
- * vacuumxid, respectively.
+ * Update pg_database's datminxid and datvacuumxid, and the flat-file copy
+ * of it. datminxid is updated to the minimum of all relminxid found in
+ * pg_class. datvacuumxid is updated to the minimum of all relvacuumxid
+ * found in pg_class. The values are also returned in minxid and
+ * vacuumxid, respectively.
*
* We violate transaction semantics here by overwriting the database's
* existing pg_database tuple with the new values. This is reasonably
HeapTuple tuple;
Form_pg_database dbform;
Relation relation;
- SysScanDesc scan;
+ SysScanDesc scan;
HeapTuple classTup;
- TransactionId newMinXid = InvalidTransactionId;
- TransactionId newVacXid = InvalidTransactionId;
+ TransactionId newMinXid = InvalidTransactionId;
+ TransactionId newVacXid = InvalidTransactionId;
bool dirty = false;
- /*
- * We must seqscan pg_class to find the minimum Xid, because there
- * is no index that can help us here.
+ /*
+ * We must seqscan pg_class to find the minimum Xid, because there is no
+ * index that can help us here.
*/
relation = heap_open(RelationRelationId, AccessShareLock);
*vacuumxid = newVacXid;
/* Mark the flat-file copy of pg_database for update at commit */
- database_file_update_needed();
+ database_file_update_needed();
}
* XXX -- the test we use here is fairly arbitrary. Note that in the
* autovacuum database-wide code, a template database is always processed
* with VACUUM FREEZE, so we can be sure that it will be truly frozen so
- * it won't be need to be processed here again soon.
+ * it won't be need to be processed here again soon.
*
* FIXME -- here we could get into a kind of loop if the database being
* chosen is not actually a template database, because we'll not freeze
* it, so its age may not really decrease if there are any live
* non-freezable tuples. Consider forcing a vacuum freeze if autovacuum
- * is invoked by a backend. On the other hand, forcing a vacuum freeze
- * on a user database may not a be a very polite thing to do.
+ * is invoked by a backend. On the other hand, forcing a vacuum freeze on
+ * a user database may not a be a very polite thing to do.
*/
if (!AutoVacuumingActive() && age > (int32) ((MaxTransactionId >> 3) * 3))
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC);
else
{
/*
- * During a lazy VACUUM we do not run any user-supplied functions,
- * and so it should be safe to not create a transaction snapshot.
+ * During a lazy VACUUM we do not run any user-supplied functions, and
+ * so it should be safe to not create a transaction snapshot.
*
* We can furthermore set the inVacuum flag, which lets other
* concurrent VACUUMs know that they can ignore this one while
* determining their OldestXmin. (The reason we don't set inVacuum
* during a full VACUUM is exactly that we may have to run user-
- * defined functions for functional indexes, and we want to make
- * sure that if they use the snapshot set above, any tuples it
- * requires can't get removed from other tables. An index function
- * that depends on the contents of other tables is arguably broken,
- * but we won't break it here by violating transaction semantics.)
+ * defined functions for functional indexes, and we want to make sure
+ * that if they use the snapshot set above, any tuples it requires
+ * can't get removed from other tables. An index function that
+ * depends on the contents of other tables is arguably broken, but we
+ * won't break it here by violating transaction semantics.)
*
* Note: the inVacuum flag remains set until CommitTransaction or
* AbortTransaction. We don't want to clear it until we reset
/*
* Open the relation and get the appropriate lock on it.
*
- * There's a race condition here: the rel may have gone away since
- * the last time we saw it. If so, we don't need to vacuum it.
+ * There's a race condition here: the rel may have gone away since the
+ * last time we saw it. If so, we don't need to vacuum it.
*/
onerel = try_relation_open(relid, lmode);
{
relation_close(onerel, lmode);
CommitTransactionCommand();
- return; /* assume no long-lived data in temp tables */
+ return; /* assume no long-lived data in temp tables */
}
/*
i;
VRelStats *vacrelstats;
TransactionId FreezeLimit,
- OldestXmin;
+ OldestXmin;
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
vacrelstats->hasindex = false;
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
- * earliest transaction that could insert new tuples in the table after our
- * VACUUM is done.
+ * earliest transaction that could insert new tuples in the table after
+ * our VACUUM is done.
*/
vacrelstats->minxid = RecentXmin;
}
else
{
- TransactionId min;
+ TransactionId min;
num_tuples += 1;
notup = false;
if (tuple.t_len > max_tlen)
max_tlen = tuple.t_len;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
TransactionId
vactuple_get_minxid(HeapTuple tuple)
{
- TransactionId min = InvalidTransactionId;
+ TransactionId min = InvalidTransactionId;
- /*
- * Initialize calculations with Xmin. NB -- may be FrozenXid and
- * we don't want that one.
+ /*
+ * Initialize calculations with Xmin. NB -- may be FrozenXid and we don't
+ * want that one.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple->t_data)))
min = HeapTupleHeaderGetXmin(tuple->t_data);
/*
* If Xmax is not marked INVALID, we assume it's valid without making
- * further checks on it --- it must be recently obsoleted or still running,
- * else HeapTupleSatisfiesVacuum would have deemed it removable.
+ * further checks on it --- it must be recently obsoleted or still
+ * running, else HeapTupleSatisfiesVacuum would have deemed it removable.
*/
if (!(tuple->t_data->t_infomask | HEAP_XMAX_INVALID))
{
- TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
+ TransactionId xmax = HeapTupleHeaderGetXmax(tuple->t_data);
/* If xmax is a plain Xid, consider it by itself */
if (!(tuple->t_data->t_infomask | HEAP_XMAX_IS_MULTI))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.79 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.80 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int num_free_pages; /* current # of entries */
int max_free_pages; /* # slots allocated in array */
PageFreeSpaceInfo *free_pages; /* array or heap of blkno/avail */
- BlockNumber tot_free_pages; /* total pages with >= threshold space */
+ BlockNumber tot_free_pages; /* total pages with >= threshold space */
} LVRelStats;
TransactionId OldestXmin);
static void lazy_vacuum_heap(Relation onerel, LVRelStats *vacrelstats);
static void lazy_vacuum_index(Relation indrel,
- IndexBulkDeleteResult **stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult **stats,
+ LVRelStats *vacrelstats);
static void lazy_cleanup_index(Relation indrel,
- IndexBulkDeleteResult *stats,
- LVRelStats *vacrelstats);
+ IndexBulkDeleteResult *stats,
+ LVRelStats *vacrelstats);
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats,
- TransactionId OldestXmin);
+ TransactionId OldestXmin);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats, TransactionId OldestXmin);
static void lazy_space_alloc(LVRelStats *vacrelstats, BlockNumber relblocks);
int nindexes;
BlockNumber possibly_freeable;
TransactionId OldestXmin,
- FreezeLimit;
+ FreezeLimit;
if (vacstmt->verbose)
elevel = INFO;
vacrelstats->threshold = GetAvgFSMRequestSize(&onerel->rd_node);
/*
- * Set initial minimum Xid, which will be updated if a smaller Xid is found
- * in the relation by lazy_scan_heap.
+ * Set initial minimum Xid, which will be updated if a smaller Xid is
+ * found in the relation by lazy_scan_heap.
*
* We use RecentXmin here (the minimum Xid that belongs to a transaction
* that is still open according to our snapshot), because it is the
num_tuples += 1;
hastup = true;
- /*
+ /*
* If the tuple is alive, we consider it for the "minxid"
* calculations.
*/
* If we remembered any tuples for deletion, then the page will be
* visited again by lazy_vacuum_heap, which will compute and record
* its post-compaction free space. If not, then we're done with this
- * page, so remember its free space as-is. (This path will always
- * be taken if there are no indexes.)
+ * page, so remember its free space as-is. (This path will always be
+ * taken if there are no indexes.)
*/
if (vacrelstats->num_dead_tuples == prev_dead_count)
{
}
}
vacrelstats->num_free_pages = j;
+
/*
* If tot_free_pages was more than num_free_pages, we can't tell for sure
* what its correct value is now, because we don't know which of the
- * forgotten pages are getting truncated. Conservatively set it equal
- * to num_free_pages.
+ * forgotten pages are getting truncated. Conservatively set it equal to
+ * num_free_pages.
*/
vacrelstats->tot_free_pages = j;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.118 2006/07/14 14:52:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.119 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* limit on names, so we can tell whether we're being passed an initial
* role name or a saved/restored value. (NOTE: we rely on guc.c to have
* properly truncated any incoming value, but not to truncate already-stored
- * values. See GUC_IS_NAME processing.)
+ * values. See GUC_IS_NAME processing.)
*/
extern char *session_authorization_string; /* in guc.c */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.97 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.98 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
def->colname = pstrdup(tle->resname);
def->typename = makeTypeNameFromOid(exprType((Node *) tle->expr),
- exprTypmod((Node *) tle->expr));
+ exprTypmod((Node *) tle->expr));
def->inhcount = 0;
def->is_local = true;
def->is_not_null = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.279 2006/08/12 20:05:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.280 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ScanDirection direction,
DestReceiver *dest);
static void ExecSelect(TupleTableSlot *slot,
- DestReceiver *dest, EState *estate);
+ DestReceiver *dest, EState *estate);
static void ExecInsert(TupleTableSlot *slot, ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
static void ExecDelete(ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
static void ExecUpdate(TupleTableSlot *slot, ItemPointer tupleid,
- TupleTableSlot *planSlot,
- DestReceiver *dest, EState *estate);
-static void ExecProcessReturning(ProjectionInfo *projectReturning,
+ TupleTableSlot *planSlot,
+ DestReceiver *dest, EState *estate);
+static void ExecProcessReturning(ProjectionInfo *projectReturning,
TupleTableSlot *tupleSlot,
TupleTableSlot *planSlot,
DestReceiver *dest);
econtext = CreateExprContext(estate);
/*
- * Build a projection for each result rel. Note that any SubPlans
- * in the RETURNING lists get attached to the topmost plan node.
+ * Build a projection for each result rel. Note that any SubPlans in
+ * the RETURNING lists get attached to the topmost plan node.
*/
Assert(list_length(parseTree->returningLists) == estate->es_num_result_relations);
resultRelInfo = estate->es_result_relations;
foreach(l, parseTree->returningLists)
{
- List *rlist = (List *) lfirst(l);
- List *rliststate;
+ List *rlist = (List *) lfirst(l);
+ List *rliststate;
rliststate = (List *) ExecInitExpr((Expr *) rlist, planstate);
resultRelInfo->ri_projectReturning =
ExecBuildProjectionInfo(rliststate, econtext, slot);
resultRelInfo++;
}
+
/*
- * Because we already ran ExecInitNode() for the top plan node,
- * any subplans we just attached to it won't have been initialized;
- * so we have to do it here. (Ugly, but the alternatives seem worse.)
+ * Because we already ran ExecInitNode() for the top plan node, any
+ * subplans we just attached to it won't have been initialized; so we
+ * have to do it here. (Ugly, but the alternatives seem worse.)
*/
foreach(l, planstate->subPlan)
{
SubPlanState *sstate = (SubPlanState *) lfirst(l);
Assert(IsA(sstate, SubPlanState));
- if (sstate->planstate == NULL) /* already inited? */
+ if (sstate->planstate == NULL) /* already inited? */
ExecInitSubPlan(sstate, estate, eflags);
}
}
erm->rti,
&update_ctid,
update_xmax,
- estate->es_snapshot->curcid);
+ estate->es_snapshot->curcid);
if (!TupIsNull(newSlot))
{
slot = planSlot = newSlot;
}
/*
- * Create a new "clean" tuple with all junk attributes removed.
- * We don't need to do this for DELETE, however (there will
- * in fact be no non-junk attributes in a DELETE!)
+ * Create a new "clean" tuple with all junk attributes removed. We
+ * don't need to do this for DELETE, however (there will in fact
+ * be no non-junk attributes in a DELETE!)
*/
if (operation != CMD_DELETE)
slot = ExecFilterJunk(junkfilter, slot);
if (resultRelInfo->ri_projectReturning)
{
/*
- * We have to put the target tuple into a slot, which means
- * first we gotta fetch it. We can use the trigger tuple slot.
+ * We have to put the target tuple into a slot, which means first we
+ * gotta fetch it. We can use the trigger tuple slot.
*/
TupleTableSlot *slot = estate->es_trig_tuple_slot;
HeapTupleData deltuple;
* dest: where to send the output
*/
static void
-ExecProcessReturning(ProjectionInfo *projectReturning,
+ExecProcessReturning(ProjectionInfo *projectReturning,
TupleTableSlot *tupleSlot,
TupleTableSlot *planSlot,
DestReceiver *dest)
{
- ExprContext *econtext = projectReturning->pi_exprContext;
- TupleTableSlot *retSlot;
+ ExprContext *econtext = projectReturning->pi_exprContext;
+ TupleTableSlot *retSlot;
/*
* Reset per-tuple memory context to free any expression evaluation
* If tuple was inserted by our own transaction, we have to check
* cmin against curCid: cmin >= curCid means our command cannot
* see the tuple, so we should ignore it. Without this we are
- * open to the "Halloween problem" of indefinitely re-updating
- * the same tuple. (We need not check cmax because
- * HeapTupleSatisfiesDirty will consider a tuple deleted by
- * our transaction dead, regardless of cmax.) We just checked
- * that priorXmax == xmin, so we can test that variable instead
- * of doing HeapTupleHeaderGetXmin again.
+ * open to the "Halloween problem" of indefinitely re-updating the
+ * same tuple. (We need not check cmax because
+ * HeapTupleSatisfiesDirty will consider a tuple deleted by our
+ * transaction dead, regardless of cmax.) We just checked that
+ * priorXmax == xmin, so we can test that variable instead of
+ * doing HeapTupleHeaderGetXmin again.
*/
if (TransactionIdIsCurrentTransactionId(priorXmax) &&
HeapTupleHeaderGetCmin(tuple.t_data) >= curCid)
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("tablespace \"%s\" does not exist",
parseTree->intoTableSpaceName)));
- } else
+ }
+ else
{
tablespaceId = GetDefaultTablespace();
/* note InvalidOid is OK in this case */
FreeTupleDesc(tupdesc);
/*
- * Advance command counter so that the newly-created relation's
- * catalog tuples will be visible to heap_open.
+ * Advance command counter so that the newly-created relation's catalog
+ * tuples will be visible to heap_open.
*/
CommandCounterIncrement();
/*
* If necessary, create a TOAST table for the INTO relation. Note that
- * AlterTableCreateToastTable ends with CommandCounterIncrement(), so
- * that the TOAST table will be visible for insertion.
+ * AlterTableCreateToastTable ends with CommandCounterIncrement(), so that
+ * the TOAST table will be visible for insertion.
*/
AlterTableCreateToastTable(intoRelationId);
/*
* We can skip WAL-logging the insertions, unless PITR is in use.
*
- * Note that for a non-temp INTO table, this is safe only because we
- * know that the catalog changes above will have been WAL-logged, and
- * so RecordTransactionCommit will think it needs to WAL-log the
- * eventual transaction commit. Else the commit might be lost, even
- * though all the data is safely fsync'd ...
+ * Note that for a non-temp INTO table, this is safe only because we know
+ * that the catalog changes above will have been WAL-logged, and so
+ * RecordTransactionCommit will think it needs to WAL-log the eventual
+ * transaction commit. Else the commit might be lost, even though all the
+ * data is safely fsync'd ...
*/
estate->es_into_relation_use_wal = XLogArchivingActive();
estate->es_into_relation_descriptor = intoRelationDesc;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.58 2006/08/02 01:59:45 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execProcnode.c,v 1.59 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case T_ValuesScan:
result = (PlanState *) ExecInitValuesScan((ValuesScan *) node,
- estate, eflags);
+ estate, eflags);
break;
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.194 2006/09/28 20:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.195 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRowCompare(RowCompareExprState *rstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static void
ShutdownTupleDescRef(Datum arg)
{
- TupleDesc *cache_field = (TupleDesc *) DatumGetPointer(arg);
+ TupleDesc *cache_field = (TupleDesc *) DatumGetPointer(arg);
if (*cache_field)
ReleaseTupleDesc(*cache_field);
/* if first time through, initialize */
if (cstate->attrMap == NULL)
{
- MemoryContext old_cxt;
- int n;
+ MemoryContext old_cxt;
+ int n;
/* allocate state in long-lived memory context */
old_cxt = MemoryContextSwitchTo(econtext->ecxt_per_query_memory);
switch (rctype)
{
- /* EQ and NE cases aren't allowed here */
+ /* EQ and NE cases aren't allowed here */
case ROWCOMPARE_LT:
result = (cmpresult < 0);
break;
for (att = 1; att <= tupDesc->natts; att++)
{
/* ignore dropped columns */
- if (tupDesc->attrs[att-1]->attisdropped)
+ if (tupDesc->attrs[att - 1]->attisdropped)
continue;
if (heap_attisnull(&tmptup, att))
{
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
- return (Datum) 0; /* keep compiler quiet */
+ return (Datum) 0; /* keep compiler quiet */
}
}
}
i = 0;
forboth(l, rcexpr->opnos, l2, rcexpr->opclasses)
{
- Oid opno = lfirst_oid(l);
- Oid opclass = lfirst_oid(l2);
- int strategy;
- Oid subtype;
- bool recheck;
- Oid proc;
+ Oid opno = lfirst_oid(l);
+ Oid opclass = lfirst_oid(l2);
+ int strategy;
+ Oid subtype;
+ bool recheck;
+ Oid proc;
get_op_opclass_properties(opno, opclass,
&strategy, &subtype, &recheck);
proc = get_opclass_proc(opclass, subtype, BTORDER_PROC);
+
/*
* If we enforced permissions checks on index support
* functions, we'd need to make a check here. But the
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("could not identify a comparison function for type %s",
format_type_be(minmaxexpr->minmaxtype))));
+
/*
* If we enforced permissions checks on index support
- * functions, we'd need to make a check here. But the
- * index support machinery doesn't do that, and neither
- * does this code.
+ * functions, we'd need to make a check here. But the index
+ * support machinery doesn't do that, and neither does this
+ * code.
*/
fmgr_info(typentry->cmp_proc, &(mstate->cfunc));
state = (ExprState *) mstate;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.97 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.98 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Fetch the slot's minimal physical tuple.
*
* If the slot contains a virtual tuple, we convert it to minimal
- * physical form. The slot retains ownership of the physical tuple.
+ * physical form. The slot retains ownership of the physical tuple.
* Likewise, if it contains a regular tuple we convert to minimal form.
*
* As above, the result must be treated as read-only.
Assert(!slot->tts_isempty);
/*
- * If we have a regular physical tuple, and it's locally palloc'd,
- * we have nothing to do.
+ * If we have a regular physical tuple, and it's locally palloc'd, we have
+ * nothing to do.
*/
if (slot->tts_tuple && slot->tts_shouldFree && slot->tts_mintuple == NULL)
return slot->tts_tuple;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.139 2006/08/04 21:33:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.140 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
resultRelInfo->ri_IndexRelationInfo = indexInfoArray;
/*
- * For each index, open the index relation and save pg_index info.
- * We acquire RowExclusiveLock, signifying we will update the index.
+ * For each index, open the index relation and save pg_index info. We
+ * acquire RowExclusiveLock, signifying we will update the index.
*/
i = 0;
foreach(l, indexoidlist)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.106 2006/09/06 20:40:47 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.107 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
for (i = 0; i < nargs; i++)
parse = (Query *) lfirst(list_tail(queryTreeList));
/*
- * Note: eventually replace this with QueryReturnsTuples? We'd need
- * a more general method of determining the output type, though.
+ * Note: eventually replace this with QueryReturnsTuples? We'd need a
+ * more general method of determining the output type, though.
*/
isSelect = (parse->commandType == CMD_SELECT && parse->into == NULL);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.145 2006/07/27 19:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.146 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AggStatePerGroup pergroupstate,
FunctionCallInfoData *fcinfo)
{
- int numArguments = peraggstate->numArguments;
+ int numArguments = peraggstate->numArguments;
MemoryContext oldContext;
- Datum newVal;
- int i;
+ Datum newVal;
+ int i;
if (peraggstate->transfn.fn_strict)
{
/*
- * For a strict transfn, nothing happens when there's a NULL input;
- * we just keep the prior transValue.
+ * For a strict transfn, nothing happens when there's a NULL input; we
+ * just keep the prior transValue.
*/
for (i = 1; i <= numArguments; i++)
{
for (aggno = 0; aggno < aggstate->numaggs; aggno++)
{
- AggStatePerAgg peraggstate = &aggstate->peragg[aggno];
- AggStatePerGroup pergroupstate = &pergroup[aggno];
- AggrefExprState *aggrefstate = peraggstate->aggrefstate;
- Aggref *aggref = peraggstate->aggref;
+ AggStatePerAgg peraggstate = &aggstate->peragg[aggno];
+ AggStatePerGroup pergroupstate = &pergroup[aggno];
+ AggrefExprState *aggrefstate = peraggstate->aggrefstate;
+ Aggref *aggref = peraggstate->aggref;
FunctionCallInfoData fcinfo;
- int i;
- ListCell *arg;
- MemoryContext oldContext;
+ int i;
+ ListCell *arg;
+ MemoryContext oldContext;
/* Switch memory context just once for all args */
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
bool haveOldVal = false;
MemoryContext workcontext = aggstate->tmpcontext->ecxt_per_tuple_memory;
MemoryContext oldContext;
- Datum *newVal;
- bool *isNull;
+ Datum *newVal;
+ bool *isNull;
FunctionCallInfoData fcinfo;
tuplesort_performsort(peraggstate->sortstate);
find_unaggregated_cols(AggState *aggstate)
{
Agg *node = (Agg *) aggstate->ss.ps.plan;
- Bitmapset *colnos;
+ Bitmapset *colnos;
colnos = NULL;
(void) find_unaggregated_cols_walker((Node *) node->plan.targetlist,
tmpmem);
/*
- * Create a list of the tuple columns that actually need to be stored
- * in hashtable entries. The incoming tuples from the child plan node
- * will contain grouping columns, other columns referenced in our
- * targetlist and qual, columns used to compute the aggregate functions,
- * and perhaps just junk columns we don't use at all. Only columns of the
- * first two types need to be stored in the hashtable, and getting rid of
- * the others can make the table entries significantly smaller. To avoid
- * messing up Var numbering, we keep the same tuple descriptor for
- * hashtable entries as the incoming tuples have, but set unwanted columns
- * to NULL in the tuples that go into the table.
+ * Create a list of the tuple columns that actually need to be stored in
+ * hashtable entries. The incoming tuples from the child plan node will
+ * contain grouping columns, other columns referenced in our targetlist
+ * and qual, columns used to compute the aggregate functions, and perhaps
+ * just junk columns we don't use at all. Only columns of the first two
+ * types need to be stored in the hashtable, and getting rid of the others
+ * can make the table entries significantly smaller. To avoid messing up
+ * Var numbering, we keep the same tuple descriptor for hashtable entries
+ * as the incoming tuples have, but set unwanted columns to NULL in the
+ * tuples that go into the table.
*
* To eliminate duplicates, we build a bitmapset of the needed columns,
- * then convert it to an integer list (cheaper to scan at runtime).
- * The list is in decreasing order so that the first entry is the largest;
+ * then convert it to an integer list (cheaper to scan at runtime). The
+ * list is in decreasing order so that the first entry is the largest;
* lookup_hash_entry depends on this to use slot_getsomeattrs correctly.
*
* Note: at present, searching the tlist/qual is not really necessary
slot_getsomeattrs(inputslot, linitial_int(aggstate->hash_needed));
foreach(l, aggstate->hash_needed)
{
- int varNumber = lfirst_int(l) - 1;
+ int varNumber = lfirst_int(l) - 1;
hashslot->tts_values[varNumber] = inputslot->tts_values[varNumber];
hashslot->tts_isnull[varNumber] = inputslot->tts_isnull[varNumber];
/*
* Use the representative input tuple for any references to
- * non-aggregated input columns in the qual and tlist. (If we are
- * not grouping, and there are no input rows at all, we will come
- * here with an empty firstSlot ... but if not grouping, there can't
- * be any references to non-aggregated input columns, so no problem.)
+ * non-aggregated input columns in the qual and tlist. (If we are not
+ * grouping, and there are no input rows at all, we will come here
+ * with an empty firstSlot ... but if not grouping, there can't be any
+ * references to non-aggregated input columns, so no problem.)
*/
econtext->ecxt_scantuple = firstSlot;
/*
* initialize child nodes
*
- * If we are doing a hashed aggregation then the child plan does not
- * need to handle REWIND efficiently; see ExecReScanAgg.
+ * If we are doing a hashed aggregation then the child plan does not need
+ * to handle REWIND efficiently; see ExecReScanAgg.
*/
if (node->aggstrategy == AGG_HASHED)
eflags &= ~EXEC_FLAG_REWIND;
Aggref *aggref = (Aggref *) aggrefstate->xprstate.expr;
AggStatePerAgg peraggstate;
Oid inputTypes[FUNC_MAX_ARGS];
- int numArguments;
+ int numArguments;
HeapTuple aggTuple;
Form_pg_aggregate aggform;
Oid aggtranstype;
*finalfnexpr;
Datum textInitVal;
int i;
- ListCell *lc;
+ ListCell *lc;
/* Planner should have assigned aggregate to correct level */
Assert(aggref->agglevelsup == 0);
peraggstate->numArguments = numArguments;
/*
- * Get actual datatypes of the inputs. These could be different
- * from the agg's declared input types, when the agg accepts ANY,
- * ANYARRAY or ANYELEMENT.
+ * Get actual datatypes of the inputs. These could be different from
+ * the agg's declared input types, when the agg accepts ANY, ANYARRAY
+ * or ANYELEMENT.
*/
i = 0;
foreach(lc, aggref->args)
/*
* If the transfn is strict and the initval is NULL, make sure input
- * type and transtype are the same (or at least binary-compatible),
- * so that it's OK to use the first input value as the initial
+ * type and transtype are the same (or at least binary-compatible), so
+ * that it's OK to use the first input value as the initial
* transValue. This should have been checked at agg definition time,
* but just in case...
*/
Assert(node->aggstrategy != AGG_HASHED);
/*
- * We don't currently implement DISTINCT aggs for aggs having
- * more than one argument. This isn't required for anything
- * in the SQL spec, but really it ought to be implemented for
+ * We don't currently implement DISTINCT aggs for aggs having more
+ * than one argument. This isn't required for anything in the SQL
+ * spec, but really it ought to be implemented for
* feature-completeness. FIXME someday.
*/
if (numArguments != 1)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.70 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAppend.c,v 1.71 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If chgParam of subnode is not null then plan will be re-scanned by
- * first ExecProcNode. However, if caller is passing us an exprCtxt
- * then forcibly rescan all the subnodes now, so that we can pass
- * the exprCtxt down to the subnodes (needed for appendrel indexscan).
+ * first ExecProcNode. However, if caller is passing us an exprCtxt
+ * then forcibly rescan all the subnodes now, so that we can pass the
+ * exprCtxt down to the subnodes (needed for appendrel indexscan).
*/
if (subnode->chgParam == NULL || exprCtxt != NULL)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.13 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapHeapscan.c,v 1.14 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pgstat_count_heap_fetch(&scan->rs_pgstat_info);
/*
- * Set up the result slot to point to this tuple. Note that the
- * slot acquires a pin on the buffer.
+ * Set up the result slot to point to this tuple. Note that the slot
+ * acquires a pin on the buffer.
*/
ExecStoreTuple(&scan->rs_ctup,
slot,
false);
/*
- * If we are using lossy info, we have to recheck the qual
- * conditions at every tuple.
+ * If we are using lossy info, we have to recheck the qual conditions
+ * at every tuple.
*/
if (tbmres->ntuples < 0)
{
static void
bitgetpage(HeapScanDesc scan, TBMIterateResult *tbmres)
{
- BlockNumber page = tbmres->blockno;
+ BlockNumber page = tbmres->blockno;
Buffer buffer;
Snapshot snapshot;
Page dp;
snapshot = scan->rs_snapshot;
/*
- * We must hold share lock on the buffer content while examining
- * tuple visibility. Afterwards, however, the tuples we have found
- * to be visible are guaranteed good as long as we hold the buffer pin.
+ * We must hold share lock on the buffer content while examining tuple
+ * visibility. Afterwards, however, the tuples we have found to be
+ * visible are guaranteed good as long as we hold the buffer pin.
*/
LockBuffer(buffer, BUFFER_LOCK_SHARE);
maxoff = PageGetMaxOffsetNumber(dp);
/*
- * Determine how many entries we need to look at on this page. If
- * the bitmap is lossy then we need to look at each physical item
- * pointer; otherwise we just look through the offsets listed in
- * tbmres.
+ * Determine how many entries we need to look at on this page. If the
+ * bitmap is lossy then we need to look at each physical item pointer;
+ * otherwise we just look through the offsets listed in tbmres.
*/
if (tbmres->ntuples >= 0)
{
Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
/*
- * Assert caller didn't ask for an unsafe snapshot --- see comments
- * at head of file.
+ * Assert caller didn't ask for an unsafe snapshot --- see comments at
+ * head of file.
*/
Assert(IsMVCCSnapshot(estate->es_snapshot));
* initialize child nodes
*
* We do this last because the child nodes will open indexscans on our
- * relation's indexes, and we want to be sure we have acquired a lock
- * on the relation first.
+ * relation's indexes, and we want to be sure we have acquired a lock on
+ * the relation first.
*/
outerPlanState(scanstate) = ExecInitNode(outerPlan(node), estate, eflags);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.20 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeBitmapIndexscan.c,v 1.21 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If we have runtime keys and they've not already been set up, do it now.
* Array keys are also treated as runtime keys; note that if ExecReScan
- * returns with biss_RuntimeKeysReady still false, then there is an
- * empty array key so we should do nothing.
+ * returns with biss_RuntimeKeysReady still false, then there is an empty
+ * array key so we should do nothing.
*/
if (!node->biss_RuntimeKeysReady &&
(node->biss_NumRuntimeKeys != 0 || node->biss_NumArrayKeys != 0))
* If we are doing runtime key calculations (ie, the index keys depend on
* data from an outer scan), compute the new key values.
*
- * Array keys are also treated as runtime keys; note that if we
- * return with biss_RuntimeKeysReady still false, then there is an
- * empty array key so no index scan is needed.
+ * Array keys are also treated as runtime keys; note that if we return
+ * with biss_RuntimeKeysReady still false, then there is an empty array
+ * key so no index scan is needed.
*/
if (node->biss_NumRuntimeKeys != 0)
ExecIndexEvalRuntimeKeys(econtext,
/*
* We do not open or lock the base relation here. We assume that an
- * ancestor BitmapHeapScan node is holding AccessShareLock (or better)
- * on the heap relation throughout the execution of the plan tree.
+ * ancestor BitmapHeapScan node is holding AccessShareLock (or better) on
+ * the heap relation throughout the execution of the plan tree.
*/
indexstate->ss.ss_currentRelation = NULL;
*/
relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
indexstate->biss_RelationDesc = index_open(node->indexid,
- relistarget ? NoLock : AccessShareLock);
+ relistarget ? NoLock : AccessShareLock);
/*
* Initialize index-specific scan state
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.84 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.85 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* since we aren't going to be able to skip the join on the strength
* of an empty inner relation anyway.)
*
- * If we are rescanning the join, we make use of information gained
- * on the previous scan: don't bother to try the prefetch if the
- * previous scan found the outer relation nonempty. This is not
- * 100% reliable since with new parameters the outer relation might
- * yield different results, but it's a good heuristic.
+ * If we are rescanning the join, we make use of information gained on
+ * the previous scan: don't bother to try the prefetch if the previous
+ * scan found the outer relation nonempty. This is not 100% reliable
+ * since with new parameters the outer relation might yield different
+ * results, but it's a good heuristic.
*
* The only way to make the check is to try to fetch a tuple from the
* outer plan node. If we succeed, we have to stash it away for later
/*
* Reset OuterNotEmpty for scan. (It's OK if we fetched a tuple
- * above, because ExecHashJoinOuterGetTuple will immediately
- * set it again.)
+ * above, because ExecHashJoinOuterGetTuple will immediately set it
+ * again.)
*/
node->hj_OuterNotEmpty = false;
}
MinimalTuple tuple;
/*
- * Since both the hash value and the MinimalTuple length word are
- * uint32, we can read them both in one BufFileRead() call without
- * any type cheating.
+ * Since both the hash value and the MinimalTuple length word are uint32,
+ * we can read them both in one BufFileRead() call without any type
+ * cheating.
*/
nread = BufFileRead(file, (void *) header, sizeof(header));
- if (nread == 0) /* end of file */
+ if (nread == 0) /* end of file */
{
ExecClearTuple(tupleSlot);
return NULL;
/*
* okay to reuse the hash table; needn't rescan inner, either.
*
- * What we do need to do is reset our state about the emptiness
- * of the outer relation, so that the new scan of the outer will
+ * What we do need to do is reset our state about the emptiness of
+ * the outer relation, so that the new scan of the outer will
* update it correctly if it turns out to be empty this time.
* (There's no harm in clearing it now because ExecHashJoin won't
* need the info. In the other cases, where the hash table
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.116 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.117 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool isNull;
/*
- * For each run-time key, extract the run-time expression and
- * evaluate it with respect to the current outer tuple. We then stick
- * the result into the proper scan key.
+ * For each run-time key, extract the run-time expression and evaluate
+ * it with respect to the current outer tuple. We then stick the
+ * result into the proper scan key.
*
* Note: the result of the eval could be a pass-by-ref value that's
* stored in the outer scan's tuple, not in
bool *elem_nulls;
/*
- * Compute and deconstruct the array expression.
- * (Notes in ExecIndexEvalRuntimeKeys() apply here too.)
+ * Compute and deconstruct the array expression. (Notes in
+ * ExecIndexEvalRuntimeKeys() apply here too.)
*/
arraydatum = ExecEvalExpr(array_expr,
econtext,
}
/*
- * Note: we expect the previous array data, if any, to be automatically
- * freed by resetting the per-tuple context; hence no pfree's here.
+ * Note: we expect the previous array data, if any, to be
+ * automatically freed by resetting the per-tuple context; hence no
+ * pfree's here.
*/
arrayKeys[j].elem_values = elem_values;
arrayKeys[j].elem_nulls = elem_nulls;
*/
relistarget = ExecRelationIsTargetRelation(estate, node->scan.scanrelid);
indexstate->iss_RelationDesc = index_open(node->indexid,
- relistarget ? NoLock : AccessShareLock);
+ relistarget ? NoLock : AccessShareLock);
/*
* Initialize index-specific scan state
&indexstate->iss_NumScanKeys,
&indexstate->iss_RuntimeKeys,
&indexstate->iss_NumRuntimeKeys,
- NULL, /* no ArrayKeys */
+ NULL, /* no ArrayKeys */
NULL);
/*
/*
* If there are any RowCompareExpr quals, we need extra ScanKey entries
* for them, and possibly extra runtime-key entries. Count up what's
- * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
+ * needed. (The subsidiary ScanKey arrays for the RowCompareExprs could
* be allocated as separate chunks, but we have to count anyway to make
* runtime_keys large enough, so might as well just do one palloc.)
*/
{
/* (indexkey, indexkey, ...) op (expression, expression, ...) */
RowCompareExpr *rc = (RowCompareExpr *) clause;
- ListCell *largs_cell = list_head(rc->largs);
- ListCell *rargs_cell = list_head(rc->rargs);
- ListCell *opnos_cell = list_head(rc->opnos);
+ ListCell *largs_cell = list_head(rc->largs);
+ ListCell *rargs_cell = list_head(rc->rargs);
+ ListCell *opnos_cell = list_head(rc->opnos);
ScanKey first_sub_key = &scan_keys[extra_scan_keys];
/* Scan RowCompare columns and generate subsidiary ScanKey items */
opclass = index->rd_indclass->values[varattno - 1];
get_op_opclass_properties(opno, opclass,
- &op_strategy, &op_subtype, &op_recheck);
+ &op_strategy, &op_subtype, &op_recheck);
if (op_strategy != rc->rctype)
elog(ERROR, "RowCompare index qualification contains wrong operator");
*/
ScanKeyEntryInitialize(this_sub_key,
flags,
- varattno, /* attribute number */
- op_strategy, /* op's strategy */
- op_subtype, /* strategy subtype */
- opfuncid, /* reg proc to use */
- scanvalue); /* constant */
+ varattno, /* attribute number */
+ op_strategy, /* op's strategy */
+ op_subtype, /* strategy subtype */
+ opfuncid, /* reg proc to use */
+ scanvalue); /* constant */
extra_scan_keys++;
}
scan_keys[extra_scan_keys - 1].sk_flags |= SK_ROW_END;
/*
- * We don't use ScanKeyEntryInitialize for the header because
- * it isn't going to contain a valid sk_func pointer.
+ * We don't use ScanKeyEntryInitialize for the header because it
+ * isn't going to contain a valid sk_func pointer.
*/
MemSet(this_scan_key, 0, sizeof(ScanKeyData));
this_scan_key->sk_flags = SK_ROW_HEADER;
* initialize the scan key's fields appropriately
*/
ScanKeyEntryInitialize(this_scan_key,
- 0, /* flags */
+ 0, /* flags */
varattno, /* attribute number to scan */
strategy, /* op's strategy */
subtype, /* strategy subtype */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.56 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMaterial.c,v 1.57 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Append returned tuple to tuplestore. NOTE: because the
- * tuplestore is certainly in EOF state, its read position will move
- * forward over the added tuple. This is what we want.
+ * Append returned tuple to tuplestore. NOTE: because the tuplestore
+ * is certainly in EOF state, its read position will move forward over
+ * the added tuple. This is what we want.
*/
if (tuplestorestate)
tuplestore_puttupleslot(tuplestorestate, outerslot);
/*
- * And return a copy of the tuple. (XXX couldn't we just return
- * the outerslot?)
+ * And return a copy of the tuple. (XXX couldn't we just return the
+ * outerslot?)
*/
return ExecCopySlot(slot, outerslot);
}
matstate->ss.ps.state = estate;
/*
- * We must have random access to the subplan output to do backward scan
- * or mark/restore. We also prefer to materialize the subplan output
- * if we might be called on to rewind and replay it many times.
- * However, if none of these cases apply, we can skip storing the data.
+ * We must have random access to the subplan output to do backward scan or
+ * mark/restore. We also prefer to materialize the subplan output if we
+ * might be called on to rewind and replay it many times. However, if none
+ * of these cases apply, we can skip storing the data.
*/
matstate->randomAccess = (eflags & (EXEC_FLAG_REWIND |
EXEC_FLAG_BACKWARD |
/*
* initialize child nodes
*
- * We shield the child node from the need to support REWIND, BACKWARD,
- * or MARK/RESTORE.
+ * We shield the child node from the need to support REWIND, BACKWARD, or
+ * MARK/RESTORE.
*/
eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.81 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.82 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Load up the new inner tuple's comparison values. If we
- * see that it contains a NULL and hence can't match any
- * outer tuple, we can skip the comparison and assume the
- * new tuple is greater than current outer.
+ * Load up the new inner tuple's comparison values. If we see
+ * that it contains a NULL and hence can't match any outer
+ * tuple, we can skip the comparison and assume the new tuple
+ * is greater than current outer.
*/
if (!MJEvalInnerValues(node, innerTupleSlot))
{
else
{
/*
- * current inner can't possibly match any outer;
- * better to advance the inner scan than the outer.
+ * current inner can't possibly match any outer; better to
+ * advance the inner scan than the outer.
*/
node->mj_JoinState = EXEC_MJ_SKIPINNER_ADVANCE;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.42 2006/03/05 15:58:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeNestloop.c,v 1.43 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* initialize child nodes
*
* Tell the inner child that cheap rescans would be good. (This is
- * unnecessary if we are doing nestloop with inner indexscan, because
- * the rescan will always be with a fresh parameter --- but since
- * nodeIndexscan doesn't actually care about REWIND, there's no point
- * in dealing with that refinement.)
+ * unnecessary if we are doing nestloop with inner indexscan, because the
+ * rescan will always be with a fresh parameter --- but since
+ * nodeIndexscan doesn't actually care about REWIND, there's no point in
+ * dealing with that refinement.)
*/
outerPlanState(nlstate) = ExecInitNode(outerPlan(node), estate, eflags);
innerPlanState(nlstate) = ExecInitNode(innerPlan(node), estate,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.60 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.61 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* open that relation and acquire appropriate lock on it.
*/
currentRelation = ExecOpenScanRelation(estate,
- ((SeqScan *) node->ps.plan)->scanrelid);
+ ((SeqScan *) node->ps.plan)->scanrelid);
currentScanDesc = heap_beginscan(currentRelation,
estate->es_snapshot,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.57 2006/06/27 16:53:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSort.c,v 1.58 2006/10/04 00:29:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
sortstate->ss.ps.state = estate;
/*
- * We must have random access to the sort output to do backward scan
- * or mark/restore. We also prefer to materialize the sort output
- * if we might be called on to rewind and replay it many times.
+ * We must have random access to the sort output to do backward scan or
+ * mark/restore. We also prefer to materialize the sort output if we
+ * might be called on to rewind and replay it many times.
*/
sortstate->randomAccess = (eflags & (EXEC_FLAG_REWIND |
EXEC_FLAG_BACKWARD |
/*
* initialize child nodes
*
- * We shield the child node from the need to support REWIND, BACKWARD,
- * or MARK/RESTORE.
+ * We shield the child node from the need to support REWIND, BACKWARD, or
+ * MARK/RESTORE.
*/
eflags &= ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK);
node->sort_Done = false;
tuplesort_end((Tuplesortstate *) node->tuplesortstate);
node->tuplesortstate = NULL;
+
/*
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.79 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.80 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* For ALL, ANY, and ROWCOMPARE sublinks, load up the Params
- * representing the columns of the sub-select, and then evaluate
- * the combining expression.
+ * representing the columns of the sub-select, and then evaluate the
+ * combining expression.
*/
col = 1;
foreach(plst, subplan->paramIds)
* NULL) results of the IN operation, then we have to store subplan output
* rows that are partly or wholly NULL. We store such rows in a separate
* hash table that we expect will be much smaller than the main table. (We
- * can use hashing to eliminate partly-null rows that are not distinct.
- * We keep them separate to minimize the cost of the inevitable full-table
+ * can use hashing to eliminate partly-null rows that are not distinct. We
+ * keep them separate to minimize the cost of the inevitable full-table
* searches; see findPartialMatch.)
*
* If it's not necessary to distinguish FALSE and UNKNOWN, then we don't
/*
* Start up the subplan (this is a very cut-down form of InitPlan())
*
- * The subplan will never need to do BACKWARD scan or MARK/RESTORE.
- * If it is a parameterless subplan (not initplan), we suggest that it
- * be prepared to handle REWIND efficiently; otherwise there is no need.
+ * The subplan will never need to do BACKWARD scan or MARK/RESTORE. If it
+ * is a parameterless subplan (not initplan), we suggest that it be
+ * prepared to handle REWIND efficiently; otherwise there is no need.
*/
eflags &= EXEC_FLAG_EXPLAIN_ONLY;
if (subplan->parParam == NIL && subplan->setParam == NIL)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.31 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubqueryscan.c,v 1.32 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* too soon during shutdown.
*/
ExecAssignScanType(&subquerystate->ss,
- CreateTupleDescCopy(ExecGetResultType(subquerystate->subplan)));
+ CreateTupleDescCopy(ExecGetResultType(subquerystate->subplan)));
/*
* Initialize result tuple type and projection info.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.50 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeTidscan.c,v 1.51 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ListCell *l;
/*
- * We initialize the array with enough slots for the case that all
- * quals are simple OpExprs. If there's any ScalarArrayOpExprs,
- * we may have to enlarge the array.
+ * We initialize the array with enough slots for the case that all quals
+ * are simple OpExprs. If there's any ScalarArrayOpExprs, we may have to
+ * enlarge the array.
*/
numAllocTids = list_length(evalList);
tidList = (ItemPointerData *)
if (is_opclause(expr))
{
- FuncExprState *fexstate = (FuncExprState *) exstate;
- Node *arg1;
- Node *arg2;
+ FuncExprState *fexstate = (FuncExprState *) exstate;
+ Node *arg1;
+ Node *arg2;
arg1 = get_leftop(expr);
arg2 = get_rightop(expr);
/*
* Sort the array of TIDs into order, and eliminate duplicates.
- * Eliminating duplicates is necessary since we want OR semantics
- * across the list. Sorting makes it easier to detect duplicates,
- * and as a bonus ensures that we will visit the heap in the most
- * efficient way.
+ * Eliminating duplicates is necessary since we want OR semantics across
+ * the list. Sorting makes it easier to detect duplicates, and as a bonus
+ * ensures that we will visit the heap in the most efficient way.
*/
if (numTids > 1)
{
- int lastTid;
- int i;
+ int lastTid;
+ int i;
qsort((void *) tidList, numTids, sizeof(ItemPointerData),
itemptr_comparator);
{
const ItemPointerData *ipa = (const ItemPointerData *) a;
const ItemPointerData *ipb = (const ItemPointerData *) b;
- BlockNumber ba = ItemPointerGetBlockNumber(ipa);
- BlockNumber bb = ItemPointerGetBlockNumber(ipb);
+ BlockNumber ba = ItemPointerGetBlockNumber(ipa);
+ BlockNumber bb = ItemPointerGetBlockNumber(ipb);
OffsetNumber oa = ItemPointerGetOffsetNumber(ipa);
OffsetNumber ob = ItemPointerGetOffsetNumber(ipb);
*
* nodeValuesscan.c
* Support routines for scanning Values lists
- * ("VALUES (...), (...), ..." in rangetable).
+ * ("VALUES (...), (...), ..." in rangetable).
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.2 2006/08/02 18:58:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeValuesscan.c,v 1.3 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ValuesNext(ValuesScanState *node)
{
TupleTableSlot *slot;
- EState *estate;
- ExprContext *econtext;
- ScanDirection direction;
- List *exprlist;
+ EState *estate;
+ ExprContext *econtext;
+ ScanDirection direction;
+ List *exprlist;
/*
* get information from the estate and scan state
}
/*
- * Always clear the result slot; this is appropriate if we are at the
- * end of the data, and if we're not, we still need it as the first step
- * of the store-virtual-tuple protocol. It seems wise to clear the slot
+ * Always clear the result slot; this is appropriate if we are at the end
+ * of the data, and if we're not, we still need it as the first step of
+ * the store-virtual-tuple protocol. It seems wise to clear the slot
* before we reset the context it might have pointers into.
*/
ExecClearTuple(slot);
ReScanExprContext(econtext);
/*
- * Build the expression eval state in the econtext's per-tuple
- * memory. This is a tad unusual, but we want to delete the eval
- * state again when we move to the next row, to avoid growth of
- * memory requirements over a long values list.
+ * Build the expression eval state in the econtext's per-tuple memory.
+ * This is a tad unusual, but we want to delete the eval state again
+ * when we move to the next row, to avoid growth of memory
+ * requirements over a long values list.
*/
oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
/*
- * Pass NULL, not my plan node, because we don't want anything
- * in this transient state linking into permanent state. The
- * only possibility is a SubPlan, and there shouldn't be any
- * (any subselects in the VALUES list should be InitPlans).
+ * Pass NULL, not my plan node, because we don't want anything in this
+ * transient state linking into permanent state. The only possibility
+ * is a SubPlan, and there shouldn't be any (any subselects in the
+ * VALUES list should be InitPlans).
*/
exprstatelist = (List *) ExecInitExpr((Expr *) exprlist, NULL);
Assert(list_length(exprstatelist) == slot->tts_tupleDescriptor->natts);
/*
- * Compute the expressions and build a virtual result tuple.
- * We already did ExecClearTuple(slot).
+ * Compute the expressions and build a virtual result tuple. We
+ * already did ExecClearTuple(slot).
*/
values = slot->tts_values;
isnull = slot->tts_isnull;
resind = 0;
foreach(lc, exprstatelist)
{
- ExprState *estate = (ExprState *) lfirst(lc);
+ ExprState *estate = (ExprState *) lfirst(lc);
values[resind] = ExecEvalExpr(estate,
econtext,
ValuesScanState *
ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags)
{
- ValuesScanState *scanstate;
- RangeTblEntry *rte;
- TupleDesc tupdesc;
- ListCell *vtl;
- int i;
- PlanState *planstate;
+ ValuesScanState *scanstate;
+ RangeTblEntry *rte;
+ TupleDesc tupdesc;
+ ListCell *vtl;
+ int i;
+ PlanState *planstate;
/*
* ValuesScan should not have any children.
/*
* Create expression contexts. We need two, one for per-sublist
- * processing and one for execScan.c to use for quals and projections.
- * We cheat a little by using ExecAssignExprContext() to build both.
+ * processing and one for execScan.c to use for quals and projections. We
+ * cheat a little by using ExecAssignExprContext() to build both.
*/
ExecAssignExprContext(estate, planstate);
scanstate->rowcontext = planstate->ps_ExprContext;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.163 2006/09/07 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.164 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int k;
/*
- * Check that the plan is something the Portal code will special-case
- * as returning one tupleset.
+ * Check that the plan is something the Portal code will special-case as
+ * returning one tupleset.
*/
if (!SPI_is_cursor_plan(spiplan))
{
errmsg("cannot open empty query as cursor")));
ereport(ERROR,
(errcode(ERRCODE_INVALID_CURSOR_DEFINITION),
- /* translator: %s is name of a SQL command, eg INSERT */
+ /* translator: %s is name of a SQL command, eg INSERT */
errmsg("cannot open %s query as cursor",
CreateQueryTag(queryTree))));
}
{
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (spiplan->nargs - 1) * sizeof(ParamExternData));
+ (spiplan->nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = spiplan->nargs;
for (k = 0; k < spiplan->nargs; k++)
/* sizeof(ParamListInfoData) includes the first array element */
paramLI = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (nargs - 1) * sizeof(ParamExternData));
+ (nargs - 1) *sizeof(ParamExternData));
paramLI->numParams = nargs;
for (k = 0; k < nargs; k++)
ActiveSnapshot = NULL;
/*
- * The last canSetTag query sets the status values returned
- * to the caller. Be careful to free any tuptables not
- * returned, to avoid intratransaction memory leak.
+ * The last canSetTag query sets the status values returned to
+ * the caller. Be careful to free any tuptables not returned,
+ * to avoid intratransaction memory leak.
*/
if (queryTree->canSetTag)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.143 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.144 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
/* Correct header from the Platform SDK */
-typedef ULONG (*__ldap_start_tls_sA)(
- IN PLDAP ExternalHandle,
- OUT PULONG ServerReturnValue,
- OUT LDAPMessage **result,
- IN PLDAPControlA *ServerControls,
- IN PLDAPControlA *ClientControls
+typedef
+ULONG(*__ldap_start_tls_sA) (
+ IN PLDAP ExternalHandle,
+ OUT PULONG ServerReturnValue,
+ OUT LDAPMessage ** result,
+ IN PLDAPControlA * ServerControls,
+ IN PLDAPControlA * ClientControls
);
#endif
-static int CheckLDAPAuth(Port *port);
+static int CheckLDAPAuth(Port *port);
#endif
break;
#endif /* USE_PAM */
#ifdef USE_LDAP
- case uaLDAP:
- errstr = gettext_noop("LDAP authentication failed for user \"%s\"");
- break;
+ case uaLDAP:
+ errstr = gettext_noop("LDAP authentication failed for user \"%s\"");
+ break;
#endif /* USE_LDAP */
default:
errstr = gettext_noop("authentication failed for user \"%s\": invalid authentication method");
#endif /* USE_PAM */
#ifdef USE_LDAP
- case uaLDAP:
- status = CheckLDAPAuth(port);
- break;
+ case uaLDAP:
+ status = CheckLDAPAuth(port);
+ break;
#endif
case uaTrust:
static int
CheckLDAPAuth(Port *port)
{
- char *passwd;
- char server[128];
- char basedn[128];
- char prefix[128];
- char suffix[128];
- LDAP *ldap;
- int ssl = 0;
- int r;
- int ldapversion = LDAP_VERSION3;
- int ldapport = LDAP_PORT;
- char fulluser[128];
-
- if (!port->auth_arg || port->auth_arg[0] == '\0')
- {
- ereport(LOG,
- (errmsg("LDAP configuration URL not specified")));
- return STATUS_ERROR;
- }
-
- /*
- * Crack the LDAP url. We do a very trivial parse..
- * ldap[s]://
[:]/[;prefix[;suffix]]
- */
-
- server[0] = '\0';
- basedn[0] = '\0';
- prefix[0] = '\0';
- suffix[0] = '\0';
-
- /* ldap, including port number */
- r = sscanf(port->auth_arg,
- "ldap://%127[^:]:%i/%127[^;];%127[^;];%127s",
- server, &ldapport, basedn, prefix, suffix);
- if (r < 3)
- {
- /* ldaps, including port number */
- r = sscanf(port->auth_arg,
- "ldaps://%127[^:]:%i/%127[^;];%127[^;];%127s",
- server, &ldapport, basedn, prefix, suffix);
- if (r >=3) ssl = 1;
- }
- if (r < 3)
- {
- /* ldap, no port number */
- r = sscanf(port->auth_arg,
- "ldap://%127[^/]/%127[^;];%127[^;];%127s",
- server, basedn, prefix, suffix);
- }
- if (r < 2)
- {
- /* ldaps, no port number */
- r = sscanf(port->auth_arg,
- "ldaps://%127[^/]/%127[^;];%127[^;];%127s",
- server, basedn, prefix, suffix);
- if (r >= 2) ssl = 1;
- }
- if (r < 2)
- {
- ereport(LOG,
- (errmsg("invalid LDAP URL: \"%s\"",
+ char *passwd;
+ char server[128];
+ char basedn[128];
+ char prefix[128];
+ char suffix[128];
+ LDAP *ldap;
+ int ssl = 0;
+ int r;
+ int ldapversion = LDAP_VERSION3;
+ int ldapport = LDAP_PORT;
+ char fulluser[128];
+
+ if (!port->auth_arg || port->auth_arg[0] == '\0')
+ {
+ ereport(LOG,
+ (errmsg("LDAP configuration URL not specified")));
+ return STATUS_ERROR;
+ }
+
+ /*
+ * Crack the LDAP url. We do a very trivial parse..
+ * ldap[s]://
[:]/[;prefix[;suffix]]
+ */
+
+ server[0] = '\0';
+ basedn[0] = '\0';
+ prefix[0] = '\0';
+ suffix[0] = '\0';
+
+ /* ldap, including port number */
+ r = sscanf(port->auth_arg,
+ "ldap://%127[^:]:%i/%127[^;];%127[^;];%127s",
+ server, &ldapport, basedn, prefix, suffix);
+ if (r < 3)
+ {
+ /* ldaps, including port number */
+ r = sscanf(port->auth_arg,
+ "ldaps://%127[^:]:%i/%127[^;];%127[^;];%127s",
+ server, &ldapport, basedn, prefix, suffix);
+ if (r >= 3)
+ ssl = 1;
+ }
+ if (r < 3)
+ {
+ /* ldap, no port number */
+ r = sscanf(port->auth_arg,
+ "ldap://%127[^/]/%127[^;];%127[^;];%127s",
+ server, basedn, prefix, suffix);
+ }
+ if (r < 2)
+ {
+ /* ldaps, no port number */
+ r = sscanf(port->auth_arg,
+ "ldaps://%127[^/]/%127[^;];%127[^;];%127s",
+ server, basedn, prefix, suffix);
+ if (r >= 2)
+ ssl = 1;
+ }
+ if (r < 2)
+ {
+ ereport(LOG,
+ (errmsg("invalid LDAP URL: \"%s\"",
port->auth_arg)));
- return STATUS_ERROR;
- }
-
- sendAuthRequest(port, AUTH_REQ_PASSWORD);
-
- passwd = recv_password_packet(port);
- if (passwd == NULL)
- return STATUS_EOF; /* client wouldn't send password */
-
- ldap = ldap_init(server, ldapport);
- if (!ldap)
- {
+ return STATUS_ERROR;
+ }
+
+ sendAuthRequest(port, AUTH_REQ_PASSWORD);
+
+ passwd = recv_password_packet(port);
+ if (passwd == NULL)
+ return STATUS_EOF; /* client wouldn't send password */
+
+ ldap = ldap_init(server, ldapport);
+ if (!ldap)
+ {
#ifndef WIN32
- ereport(LOG,
- (errmsg("could not initialize LDAP: error code %d",
- errno)));
+ ereport(LOG,
+ (errmsg("could not initialize LDAP: error code %d",
+ errno)));
#else
- ereport(LOG,
- (errmsg("could not initialize LDAP: error code %d",
- (int) LdapGetLastError())));
+ ereport(LOG,
+ (errmsg("could not initialize LDAP: error code %d",
+ (int) LdapGetLastError())));
#endif
- return STATUS_ERROR;
- }
-
- if ((r = ldap_set_option(ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS)
- {
- ereport(LOG,
- (errmsg("could not set LDAP protocol version: error code %d", r)));
- return STATUS_ERROR;
- }
-
- if (ssl)
- {
+ return STATUS_ERROR;
+ }
+
+ if ((r = ldap_set_option(ldap, LDAP_OPT_PROTOCOL_VERSION, &ldapversion)) != LDAP_SUCCESS)
+ {
+ ereport(LOG,
+ (errmsg("could not set LDAP protocol version: error code %d", r)));
+ return STATUS_ERROR;
+ }
+
+ if (ssl)
+ {
#ifndef WIN32
if ((r = ldap_start_tls_s(ldap, NULL, NULL)) != LDAP_SUCCESS)
#else
* exist on Windows 2000, and causes a load error for the whole
* exe if referenced.
*/
- HANDLE ldaphandle;
-
+ HANDLE ldaphandle;
+
ldaphandle = LoadLibrary("WLDAP32.DLL");
if (ldaphandle == NULL)
{
- /* should never happen since we import other files from wldap32, but check anyway */
+ /*
+ * should never happen since we import other files from
+ * wldap32, but check anyway
+ */
ereport(LOG,
(errmsg("could not load wldap32.dll")));
return STATUS_ERROR;
}
- _ldap_start_tls_sA = (__ldap_start_tls_sA)GetProcAddress(ldaphandle, "ldap_start_tls_sA");
+ _ldap_start_tls_sA = (__ldap_start_tls_sA) GetProcAddress(ldaphandle, "ldap_start_tls_sA");
if (_ldap_start_tls_sA == NULL)
{
ereport(LOG,
* process and is automatically cleaned up on process exit.
*/
}
- if ((r = _ldap_start_tls_sA(ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
+ if ((r = _ldap_start_tls_sA(ldap, NULL, NULL, NULL, NULL)) != LDAP_SUCCESS)
#endif
- {
- ereport(LOG,
- (errmsg("could not start LDAP TLS session: error code %d", r)));
- return STATUS_ERROR;
- }
- }
-
- snprintf(fulluser, sizeof(fulluser)-1, "%s%s%s",
+ {
+ ereport(LOG,
+ (errmsg("could not start LDAP TLS session: error code %d", r)));
+ return STATUS_ERROR;
+ }
+ }
+
+ snprintf(fulluser, sizeof(fulluser) - 1, "%s%s%s",
prefix, port->user_name, suffix);
- fulluser[sizeof(fulluser)-1] = '\0';
+ fulluser[sizeof(fulluser) - 1] = '\0';
- r = ldap_simple_bind_s(ldap, fulluser, passwd);
- ldap_unbind(ldap);
+ r = ldap_simple_bind_s(ldap, fulluser, passwd);
+ ldap_unbind(ldap);
- if (r != LDAP_SUCCESS)
- {
- ereport(LOG,
- (errmsg("LDAP login failed for user \"%s\" on server \"%s\": error code %d",
+ if (r != LDAP_SUCCESS)
+ {
+ ereport(LOG,
+ (errmsg("LDAP login failed for user \"%s\" on server \"%s\": error code %d",
fulluser, server, r)));
- return STATUS_ERROR;
- }
-
- return STATUS_OK;
-}
+ return STATUS_ERROR;
+ }
+ return STATUS_OK;
+}
#endif /* USE_LDAP */
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.72 2006/09/04 14:57:27 petere Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.73 2006/10/04 00:29:53 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
static DH *tmp_dh_cb(SSL *s, int is_export, int keylength);
static int verify_cb(int, X509_STORE_CTX *);
static void info_cb(const SSL *ssl, int type, int args);
-static void initialize_SSL(void);
+static void initialize_SSL(void);
static void destroy_SSL(void);
static int open_server_SSL(Port *);
static void close_SSL(Port *);
else
{
/*
- * Check the Certificate Revocation List (CRL) if file exists.
- * http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci803160,00.html
+ * Check the Certificate Revocation List (CRL) if file exists.
+ * http://searchsecurity.techtarget.com/sDefinition/0,,sid14_gci803160,
+ * 00.html
*/
X509_STORE *cvstore = SSL_CTX_get_cert_store(SSL_context);
/* OpenSSL 0.96 does not support X509_V_FLAG_CRL_CHECK */
#ifdef X509_V_FLAG_CRL_CHECK
X509_STORE_set_flags(cvstore,
- X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
+ X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
#else
ereport(LOG,
- (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" ignored",
- ROOT_CRL_FILE),
- errdetail("Installed SSL library does not support CRL.")));
+ (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" ignored",
+ ROOT_CRL_FILE),
+ errdetail("Installed SSL library does not support CRL.")));
#endif
else
{
/* Not fatal - we do not require CRL */
ereport(LOG,
- (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" not found, skipping: %s",
- ROOT_CRL_FILE, SSLerrmessage()),
+ (errmsg("SSL Certificate Revocation List (CRL) file \"%s\" not found, skipping: %s",
+ ROOT_CRL_FILE, SSLerrmessage()),
errdetail("Will not check certificates against CRL.")));
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.155 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.156 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*userauth_p = uaPAM;
#endif
#ifdef USE_LDAP
- else if (strcmp(token,"ldap") == 0)
- *userauth_p = uaLDAP;
+ else if (strcmp(token, "ldap") == 0)
+ *userauth_p = uaLDAP;
#endif
else
{
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.187 2006/08/11 20:44:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.188 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
#ifdef WIN32
+
/*
- * This is a Win32 socket optimization. The ideal size is 32k.
- * http://support.microsoft.com/kb/823764/EN-US/
+ * This is a Win32 socket optimization. The ideal size is 32k.
+ * http://support.microsoft.com/kb/823764/EN-US/
*/
on = PQ_BUFFER_SIZE * 4;
if (setsockopt(port->sock, SOL_SOCKET, SO_SNDBUF, (char *) &on,
- sizeof(on)) < 0)
+ sizeof(on)) < 0)
{
elog(LOG, "setsockopt(SO_SNDBUF) failed: %m");
return STATUS_ERROR;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.104 2006/07/14 14:52:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.105 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
set_pglocale_pgservice(argv[0], "postgres");
#ifdef WIN32
+
/*
* Windows uses codepages rather than the environment, so we work around
* that by querying the environment explicitly first for LC_COLLATE and
check_root(progname);
/*
- * Dispatch to one of various subprograms depending on first
- * argument.
+ * Dispatch to one of various subprograms depending on first argument.
*/
#ifdef EXEC_BACKEND
#endif
#ifdef WIN32
+
/*
* Start our win32 signal implementation
*
/*
- * Place platform-specific startup hacks here. This is the right
+ * Place platform-specific startup hacks here. This is the right
* place to put code that must be executed early in launch of either a
* postmaster, a standalone backend, or a standalone bootstrap run.
* Note that this code will NOT be executed when a backend or
/*
- * On some platforms, unaligned memory accesses result in a kernel
- * trap; the default kernel behavior is to emulate the memory
- * access, but this results in a significant performance penalty.
- * We ought to fix PG not to make such unaligned memory accesses,
- * so this code disables the kernel emulation: unaligned accesses
- * will result in SIGBUS instead.
+ * On some platforms, unaligned memory accesses result in a kernel trap;
+ * the default kernel behavior is to emulate the memory access, but this
+ * results in a significant performance penalty. We ought to fix PG not to
+ * make such unaligned memory accesses, so this code disables the kernel
+ * emulation: unaligned accesses will result in SIGBUS instead.
*/
#ifdef NOFIXADE
write_stderr("%s: setsysinfo failed: %s\n",
progname, strerror(errno));
#endif
-
-#endif /* NOFIXADE */
+#endif /* NOFIXADE */
#ifdef WIN32
}
/* In case of general protection fault, don't show GUI popup box */
- SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
printf(_(" -x NUM internal use\n"));
printf(_("\nPlease read the documentation for the complete list of run-time\n"
- "configuration settings and how to set them on the command line or in\n"
+ "configuration settings and how to set them on the command line or in\n"
"the configuration file.\n\n"
}
{
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
- "possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "possible system security compromise. See the documentation for\n"
+ "more information on how to properly start the server.\n");
exit(1);
}
/*
- * Also make sure that real and effective uids are the same.
- * Executing as a setuid program from a root shell is a security
- * hole, since on many platforms a nefarious subroutine could
- * setuid back to root if real uid is root. (Since nobody
- * actually uses postgres as a setuid program, trying to
- * actively fix this situation seems more trouble than it's worth;
- * we'll just expend the effort to check for it.)
+ * Also make sure that real and effective uids are the same. Executing as
+ * a setuid program from a root shell is a security hole, since on many
+ * platforms a nefarious subroutine could setuid back to root if real uid
+ * is root. (Since nobody actually uses postgres as a setuid program,
+ * trying to actively fix this situation seems more trouble than it's
+ * worth; we'll just expend the effort to check for it.)
*/
if (getuid() != geteuid())
{
progname);
exit(1);
}
-#else /* WIN32 */
+#else /* WIN32 */
if (pgwin32_is_admin())
{
write_stderr("Execution of PostgreSQL by a user with administrative permissions is not\n"
"permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
- "possible system security compromises. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "possible system security compromises. See the documentation for\n"
+ "more information on how to properly start the server.\n");
exit(1);
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.350 2006/08/30 23:34:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.351 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static RowCompareExpr *
_copyRowCompareExpr(RowCompareExpr *from)
{
- RowCompareExpr *newnode = makeNode(RowCompareExpr);
+ RowCompareExpr *newnode = makeNode(RowCompareExpr);
COPY_SCALAR_FIELD(rctype);
COPY_NODE_FIELD(opnos);
}
static DropOwnedStmt *
-_copyDropOwnedStmt(DropOwnedStmt * from)
+_copyDropOwnedStmt(DropOwnedStmt *from)
{
DropOwnedStmt *newnode = makeNode(DropOwnedStmt);
}
static ReassignOwnedStmt *
-_copyReassignOwnedStmt(ReassignOwnedStmt * from)
+_copyReassignOwnedStmt(ReassignOwnedStmt *from)
{
ReassignOwnedStmt *newnode = makeNode(ReassignOwnedStmt);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.284 2006/08/30 23:34:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.285 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
static bool
-_equalDropOwnedStmt(DropOwnedStmt * a, DropOwnedStmt * b)
+_equalDropOwnedStmt(DropOwnedStmt *a, DropOwnedStmt *b)
{
COMPARE_NODE_FIELD(roles);
COMPARE_SCALAR_FIELD(behavior);
}
static bool
-_equalReassignOwnedStmt(ReassignOwnedStmt * a, ReassignOwnedStmt * b)
+_equalReassignOwnedStmt(ReassignOwnedStmt *a, ReassignOwnedStmt *b)
{
COMPARE_NODE_FIELD(roles);
COMPARE_NODE_FIELD(newrole);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.51 2006/08/21 00:57:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/makefuncs.c,v 1.52 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
DefElem *
makeDefElem(char *name, Node *arg)
{
- DefElem *res = makeNode(DefElem);
+ DefElem *res = makeNode(DefElem);
res->defname = name;
res->arg = arg;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.6 2006/04/22 01:25:59 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.7 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* sizeof(ParamListInfoData) includes the first array element */
size = sizeof(ParamListInfoData) +
- (from->numParams - 1) * sizeof(ParamExternData);
+ (from->numParams - 1) *sizeof(ParamExternData);
retval = (ParamListInfo) palloc(size);
memcpy(retval, from, size);
/*
- * Flat-copy is not good enough for pass-by-ref data values, so make
- * a pass over the array to copy those.
+ * Flat-copy is not good enough for pass-by-ref data values, so make a
+ * pass over the array to copy those.
*/
for (i = 0; i < retval->numParams; i++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.153 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.154 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
- Index rti, RangeTblEntry *rte);
+ Index rti, RangeTblEntry *rte);
static void set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte);
static void set_function_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte);
static void set_values_pathlist(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static RelOptInfo *make_rel_from_joinlist(PlannerInfo *root, List *joinlist);
static RelOptInfo *make_one_rel_by_joins(PlannerInfo *root, int levels_needed,
List *initial_rels);
* set_append_rel_pathlist
* Build access paths for an "append relation"
*
- * The passed-in rel and RTE represent the entire append relation. The
+ * The passed-in rel and RTE represent the entire append relation. The
* relation's contents are computed by appending together the output of
* the individual member relations. Note that in the inheritance case,
* the first member relation is actually the same table as is mentioned in
/*
* XXX for now, can't handle inherited expansion of FOR UPDATE/SHARE; can
* we do better? (This will take some redesign because the executor
- * currently supposes that every rowMark relation is involved in every
- * row returned by the query.)
+ * currently supposes that every rowMark relation is involved in every row
+ * returned by the query.)
*/
if (get_rowmark(root->parse, parentRTindex))
ereport(ERROR,
childrel->max_attr);
/*
- * Compute the child's access paths, and add the cheapest one
- * to the Append path we are constructing for the parent.
+ * Compute the child's access paths, and add the cheapest one to the
+ * Append path we are constructing for the parent.
*
- * It's possible that the child is itself an appendrel, in which
- * case we can "cut out the middleman" and just add its child
- * paths to our own list. (We don't try to do this earlier because
- * we need to apply both levels of transformation to the quals.)
- * This test also handles the case where the child rel need not
- * be scanned because of constraint exclusion: it'll have an
- * Append path with no subpaths, and will vanish from our list.
+ * It's possible that the child is itself an appendrel, in which case
+ * we can "cut out the middleman" and just add its child paths to our
+ * own list. (We don't try to do this earlier because we need to
+ * apply both levels of transformation to the quals.) This test also
+ * handles the case where the child rel need not be scanned because of
+ * constraint exclusion: it'll have an Append path with no subpaths,
+ * and will vanish from our list.
*/
set_rel_pathlist(root, childrel, childRTindex);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.81 2006/07/14 14:52:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.82 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
rinfo = (RestrictInfo *) clause;
/*
- * If the clause is marked pseudoconstant, then it will be used as
- * a gating qual and should not affect selectivity estimates; hence
- * return 1.0. The only exception is that a constant FALSE may
- * be taken as having selectivity 0.0, since it will surely mean
- * no rows out of the plan. This case is simple enough that we
- * need not bother caching the result.
+ * If the clause is marked pseudoconstant, then it will be used as a
+ * gating qual and should not affect selectivity estimates; hence
+ * return 1.0. The only exception is that a constant FALSE may be
+ * taken as having selectivity 0.0, since it will surely mean no rows
+ * out of the plan. This case is simple enough that we need not
+ * bother caching the result.
*/
if (rinfo->pseudoconstant)
{
- if (! IsA(rinfo->clause, Const))
+ if (!IsA(rinfo->clause, Const))
return s1;
}
else if (IsA(clause, Const))
{
/* bool constant is pretty easy... */
- Const *con = (Const *) clause;
+ Const *con = (Const *) clause;
s1 = con->constisnull ? 0.0 :
DatumGetBool(con->constvalue) ? 1.0 : 0.0;
if (IsA(subst, Const))
{
/* bool constant is pretty easy... */
- Const *con = (Const *) subst;
+ Const *con = (Const *) subst;
s1 = con->constisnull ? 0.0 :
DatumGetBool(con->constvalue) ? 1.0 : 0.0;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.166 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.167 2006/10/04 00:29:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
double cpu_index_tuple_cost = DEFAULT_CPU_INDEX_TUPLE_COST;
double cpu_operator_cost = DEFAULT_CPU_OPERATOR_COST;
-int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
+int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
Cost disable_cost = 100000000.0;
if (outer_rel != NULL && outer_rel->rows > 1)
{
/*
- * For repeated indexscans, scale up the number of tuples fetched
- * in the Mackert and Lohman formula by the number of scans, so
- * that we estimate the number of pages fetched by all the scans.
- * Then pro-rate the costs for one scan. In this case we assume
- * all the fetches are random accesses. XXX it'd be good to
- * include correlation in this model, but it's not clear how to do
- * that without double-counting cache effects.
+ * For repeated indexscans, scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans. Then
+ * pro-rate the costs for one scan. In this case we assume all the
+ * fetches are random accesses. XXX it'd be good to include
+ * correlation in this model, but it's not clear how to do that
+ * without double-counting cache effects.
*/
double num_scans = outer_rel->rows;
* computed for us by query_planner.
*
* Caller is expected to have ensured that tuples_fetched is greater than zero
- * and rounded to integer (see clamp_row_est). The result will likewise be
+ * and rounded to integer (see clamp_row_est). The result will likewise be
* greater than zero and integral.
*/
double
Assert(T <= total_pages);
/* b is pro-rated share of effective_cache_size */
- b = (double) effective_cache_size * T / total_pages;
+ b = (double) effective_cache_size *T / total_pages;
+
/* force it positive and integral */
if (b <= 1.0)
b = 1.0;
if (outer_rel != NULL && outer_rel->rows > 1)
{
/*
- * For repeated bitmap scans, scale up the number of tuples fetched
- * in the Mackert and Lohman formula by the number of scans, so
- * that we estimate the number of pages fetched by all the scans.
- * Then pro-rate for one scan.
+ * For repeated bitmap scans, scale up the number of tuples fetched in
+ * the Mackert and Lohman formula by the number of scans, so that we
+ * estimate the number of pages fetched by all the scans. Then
+ * pro-rate for one scan.
*/
double num_scans = outer_rel->rows;
/*
* For small numbers of pages we should charge random_page_cost apiece,
* while if nearly all the table's pages are being read, it's more
- * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
+ * appropriate to charge seq_page_cost apiece. The effect is nonlinear,
* too. For lack of a better idea, interpolate like this to determine the
* cost per page.
*/
{
/* Each element of the array yields 1 tuple */
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) lfirst(l);
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
ntuples += estimate_array_length(arraynode);
}
Assert(baserel->rtekind == RTE_VALUES);
/*
- * For now, estimate list evaluation cost at one operator eval per
- * list (probably pretty bogus, but is it worth being smarter?)
+ * For now, estimate list evaluation cost at one operator eval per list
+ * (probably pretty bogus, but is it worth being smarter?)
*/
cpu_per_tuple = cpu_operator_cost;
* If the total volume exceeds work_mem, we switch to a tape-style merge
* algorithm. There will still be about t*log2(t) tuple comparisons in
* total, but we will also need to write and read each tuple once per
- * merge pass. We expect about ceil(logM(r)) merge passes where r is the
+ * merge pass. We expect about ceil(logM(r)) merge passes where r is the
* number of initial runs formed and M is the merge order used by tuplesort.c.
* Since the average initial run should be about twice work_mem, we have
* disk traffic = 2 * relsize * ceil(logM(p / (2*work_mem)))
* array elements before the answer is determined.
*/
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) node;
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
total->per_tuple +=
cpu_operator_cost * estimate_array_length(arraynode) * 0.5;
*
* If we are doing an outer join, take that into account: the output must
* be at least as large as the non-nullable input. (Is there any chance
- * of being even smarter?) (XXX this is not really right, because it
+ * of being even smarter?) (XXX this is not really right, because it
* assumes all the restriction clauses are join clauses; we should figure
* pushed-down clauses separately.)
*
Assert(rte->rtekind == RTE_VALUES);
/*
- * Estimate number of rows the values list will return.
- * We know this precisely based on the list length (well,
- * barring set-returning functions in list items, but that's
- * a refinement not catered for anywhere else either).
+ * Estimate number of rows the values list will return. We know this
+ * precisely based on the list length (well, barring set-returning
+ * functions in list items, but that's a refinement not catered for
+ * anywhere else either).
*/
rel->tuples = list_length(rte->values_lists);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.211 2006/07/22 15:41:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.212 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *clauses, List *outer_clauses,
bool istoplevel, RelOptInfo *outer_rel);
static Path *choose_bitmap_and(PlannerInfo *root, RelOptInfo *rel,
- List *paths, RelOptInfo *outer_rel);
+ List *paths, RelOptInfo *outer_rel);
static int bitmap_path_comparator(const void *a, const void *b);
static Cost bitmap_and_cost_est(PlannerInfo *root, RelOptInfo *rel,
- List *paths, RelOptInfo *outer_rel);
+ List *paths, RelOptInfo *outer_rel);
static List *pull_indexpath_quals(Path *bitmapqual);
static bool lists_intersect_ptr(List *list1, List *list2);
static bool match_clause_to_indexcol(IndexOptInfo *index,
Relids outer_relids,
SaOpControl saop_control);
static bool is_indexable_operator(Oid expr_op, Oid opclass,
- bool indexkey_on_left);
+ bool indexkey_on_left);
static bool match_rowcompare_to_indexcol(IndexOptInfo *index,
int indexcol,
Oid opclass,
/*
* 1. Match the index against the available restriction clauses.
* found_clause is set true only if at least one of the current
- * clauses was used (and, if saop_control is SAOP_REQUIRE, it
- * has to have been a ScalarArrayOpExpr clause).
+ * clauses was used (and, if saop_control is SAOP_REQUIRE, it has to
+ * have been a ScalarArrayOpExpr clause).
*/
restrictclauses = group_clauses_by_indexkey(index,
clauses,
ListCell *l;
/*
- * Since find_usable_indexes is relatively expensive, don't bother to
- * run it unless there are some top-level ScalarArrayOpExpr clauses.
+ * Since find_usable_indexes is relatively expensive, don't bother to run
+ * it unless there are some top-level ScalarArrayOpExpr clauses.
*/
foreach(l, clauses)
{
* In theory we should consider every nonempty subset of the given paths.
* In practice that seems like overkill, given the crude nature of the
* estimates, not to mention the possible effects of higher-level AND and
- * OR clauses. As a compromise, we sort the paths by selectivity. We
+ * OR clauses. As a compromise, we sort the paths by selectivity. We
* always take the first, and sequentially add on paths that result in a
* lower estimated cost.
*
* We also make some effort to detect directly redundant input paths, as
- * can happen if there are multiple possibly usable indexes. (Another
- * way it can happen is that best_inner_indexscan will find the same OR
- * join clauses that create_or_index_quals has pulled OR restriction
- * clauses out of, and then both versions show up as duplicate paths.) We
+ * can happen if there are multiple possibly usable indexes. (Another way
+ * it can happen is that best_inner_indexscan will find the same OR join
+ * clauses that create_or_index_quals has pulled OR restriction clauses
+ * out of, and then both versions show up as duplicate paths.) We
* consider an index redundant if any of its index conditions were already
* used by earlier indexes. (We could use predicate_implied_by to have a
* more intelligent, but much more expensive, check --- but in most cases
foreach(cell1, list1)
{
- void *datum1 = lfirst(cell1);
+ void *datum1 = lfirst(cell1);
ListCell *cell2;
foreach(cell2, list2)
* It is also possible to match RowCompareExpr clauses to indexes (but
* currently, only btree indexes handle this). In this routine we will
* report a match if the first column of the row comparison matches the
- * target index column. This is sufficient to guarantee that some index
+ * target index column. This is sufficient to guarantee that some index
* condition can be constructed from the RowCompareExpr --- whether the
* remaining columns match the index too is considered in
* expand_indexqual_rowcompare().
bool plain_op;
/*
- * Never match pseudoconstants to indexes. (Normally this could not
- * happen anyway, since a pseudoconstant clause couldn't contain a
- * Var, but what if someone builds an expression index on a constant?
- * It's not totally unreasonable to do so with a partial index, either.)
+ * Never match pseudoconstants to indexes. (Normally this could not
+ * happen anyway, since a pseudoconstant clause couldn't contain a Var,
+ * but what if someone builds an expression index on a constant? It's not
+ * totally unreasonable to do so with a partial index, either.)
*/
if (rinfo->pseudoconstant)
return false;
* NOTE: because we cache on outer_relids rather than outer_rel->relids,
* we will report the same path and hence path cost for joins with
* different sets of irrelevant rels on the outside. Now that cost_index
- * is sensitive to outer_rel->rows, this is not really right. However
- * the error is probably not large. Is it worth establishing a separate
- * cache entry for each distinct outer_rel->relids set to get this right?
+ * is sensitive to outer_rel->rows, this is not really right. However the
+ * error is probably not large. Is it worth establishing a separate cache
+ * entry for each distinct outer_rel->relids set to get this right?
*/
foreach(l, rel->index_inner_paths)
{
*
* Note: because we include restriction clauses, we will find indexscans
* that could be plain indexscans, ie, they don't require the join context
- * at all. This may seem redundant, but we need to include those scans in
+ * at all. This may seem redundant, but we need to include those scans in
* the input given to choose_bitmap_and() to be sure we find optimal AND
- * combinations of join and non-join scans. Also, even if the "best
- * inner indexscan" is just a plain indexscan, it will have a different
- * cost estimate because of cache effects.
+ * combinations of join and non-join scans. Also, even if the "best inner
+ * indexscan" is just a plain indexscan, it will have a different cost
+ * estimate because of cache effects.
*/
clause_list = find_clauses_for_join(root, rel, outer_relids, isouterjoin);
foreach(l, (List *) lfirst(clausegroup_item))
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
- Expr *clause = rinfo->clause;
+ Expr *clause = rinfo->clause;
/* First check for boolean cases */
if (IsBooleanOpclass(curClass))
{
resultquals = list_concat(resultquals,
expand_indexqual_opclause(rinfo,
- curClass));
+ curClass));
}
else if (IsA(clause, ScalarArrayOpExpr))
{
expand_indexqual_opclause(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
+
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
* column matches) or a simple OpExpr (if the first-column match is all
* there is). In these cases the modified clause is always "<=" or ">="
* even when the original was "<" or ">" --- this is necessary to match all
- * the rows that could match the original. (We are essentially building a
+ * the rows that could match the original. (We are essentially building a
* lossy version of the row comparison when we do this.)
*/
static RestrictInfo *
int indexcol)
{
RowCompareExpr *clause = (RowCompareExpr *) rinfo->clause;
- bool var_on_left;
- int op_strategy;
- Oid op_subtype;
- bool op_recheck;
- int matching_cols;
- Oid expr_op;
- List *opclasses;
- List *subtypes;
- List *new_ops;
- ListCell *largs_cell;
- ListCell *rargs_cell;
- ListCell *opnos_cell;
+ bool var_on_left;
+ int op_strategy;
+ Oid op_subtype;
+ bool op_recheck;
+ int matching_cols;
+ Oid expr_op;
+ List *opclasses;
+ List *subtypes;
+ List *new_ops;
+ ListCell *largs_cell;
+ ListCell *rargs_cell;
+ ListCell *opnos_cell;
/* We have to figure out (again) how the first col matches */
var_on_left = match_index_to_operand((Node *) linitial(clause->largs),
subtypes = list_make1_oid(op_subtype);
/*
- * See how many of the remaining columns match some index column
- * in the same way. A note about rel membership tests: we assume
- * that the clause as a whole is already known to use only Vars from
- * the indexed relation and possibly some acceptable outer relations.
- * So the "other" side of any potential index condition is OK as long
- * as it doesn't use Vars from the indexed relation.
+ * See how many of the remaining columns match some index column in the
+ * same way. A note about rel membership tests: we assume that the clause
+ * as a whole is already known to use only Vars from the indexed relation
+ * and possibly some acceptable outer relations. So the "other" side of
+ * any potential index condition is OK as long as it doesn't use Vars from
+ * the indexed relation.
*/
matching_cols = 1;
largs_cell = lnext(list_head(clause->largs));
break; /* no good, volatile comparison value */
/*
- * The Var side can match any column of the index. If the user
- * does something weird like having multiple identical index
- * columns, we insist the match be on the first such column,
- * to avoid confusing the executor.
+ * The Var side can match any column of the index. If the user does
+ * something weird like having multiple identical index columns, we
+ * insist the match be on the first such column, to avoid confusing
+ * the executor.
*/
for (i = 0; i < index->ncolumns; i++)
{
return rinfo;
/*
- * We have to generate a subset rowcompare (possibly just one OpExpr).
- * The painful part of this is changing < to <= or > to >=, so deal with
- * that first.
+ * We have to generate a subset rowcompare (possibly just one OpExpr). The
+ * painful part of this is changing < to <= or > to >=, so deal with that
+ * first.
*/
if (op_strategy == BTLessEqualStrategyNumber ||
op_strategy == BTGreaterEqualStrategyNumber)
}
else
{
- ListCell *opclasses_cell;
- ListCell *subtypes_cell;
+ ListCell *opclasses_cell;
+ ListCell *subtypes_cell;
if (op_strategy == BTLessStrategyNumber)
op_strategy = BTLessEqualStrategyNumber;
expr_op = get_opclass_member(lfirst_oid(opclasses_cell),
lfirst_oid(subtypes_cell),
op_strategy);
- if (!OidIsValid(expr_op)) /* should not happen */
+ if (!OidIsValid(expr_op)) /* should not happen */
elog(ERROR, "could not find member %d of opclass %u",
op_strategy, lfirst_oid(opclasses_cell));
if (!var_on_left)
{
expr_op = get_commutator(expr_op);
- if (!OidIsValid(expr_op)) /* should not happen */
+ if (!OidIsValid(expr_op)) /* should not happen */
elog(ERROR, "could not find commutator of member %d of opclass %u",
op_strategy, lfirst_oid(opclasses_cell));
}
}
else
{
- Expr *opexpr;
+ Expr *opexpr;
opexpr = make_opclause(linitial_oid(new_ops), BOOLOID, false,
copyObject(linitial(clause->largs)),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.106 2006/08/17 17:06:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.107 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
RelOptInfo *outerrel, RelOptInfo *innerrel,
List *restrictlist, JoinType jointype);
static Path *best_appendrel_indexscan(PlannerInfo *root, RelOptInfo *rel,
- RelOptInfo *outer_rel, JoinType jointype);
+ RelOptInfo *outer_rel, JoinType jointype);
static List *select_mergejoin_clauses(RelOptInfo *joinrel,
RelOptInfo *outerrel,
RelOptInfo *innerrel,
* mergejoin using a subset of the merge clauses. Here, we consider
* both cheap startup cost and cheap total cost. We can ignore
* inner_cheapest_total on the first iteration, since we already made
- * a path with it --- but not on later iterations with shorter
- * sort keys, because then we are considering a different situation,
- * viz using a simpler mergejoin to avoid a sort of the inner rel.
+ * a path with it --- but not on later iterations with shorter sort
+ * keys, because then we are considering a different situation, viz
+ * using a simpler mergejoin to avoid a sort of the inner rel.
*/
num_sortkeys = list_length(innersortkeys);
if (num_sortkeys > 1 && !useallclauses)
* best_appendrel_indexscan
* Finds the best available set of inner indexscans for a nestloop join
* with the given append relation on the inside and the given outer_rel
- * outside. Returns an AppendPath comprising the best inner scans, or
+ * outside. Returns an AppendPath comprising the best inner scans, or
* NULL if there are no possible inner indexscans.
*/
static Path *
Assert(childrel->reloptkind == RELOPT_OTHER_MEMBER_REL);
/*
- * Check to see if child was rejected by constraint exclusion.
- * If so, it will have a cheapest_total_path that's an Append path
- * with no members (see set_plain_rel_pathlist).
+ * Check to see if child was rejected by constraint exclusion. If so,
+ * it will have a cheapest_total_path that's an Append path with no
+ * members (see set_plain_rel_pathlist).
*/
if (IsA(childrel->cheapest_total_path, AppendPath) &&
((AppendPath *) childrel->cheapest_total_path)->subpaths == NIL)
outer_rel, jointype);
/*
- * If no luck on an indexpath for this rel, we'll still consider
- * an Append substituting the cheapest-total inner path. However
- * we must find at least one indexpath, else there's not going to
- * be any improvement over the base path for the appendrel.
+ * If no luck on an indexpath for this rel, we'll still consider an
+ * Append substituting the cheapest-total inner path. However we must
+ * find at least one indexpath, else there's not going to be any
+ * improvement over the base path for the appendrel.
*/
if (bestinnerjoin)
found_indexscan = true;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.79 2006/03/05 15:58:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.80 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* An exception occurs when there is a clauseless join inside a
- * construct that restricts join order, i.e., an outer join RHS
- * or an IN (sub-SELECT) construct. Here, the rel may well have
- * join clauses against stuff outside the OJ RHS or IN sub-SELECT,
- * but the clauseless join *must* be done before we can make use
- * of those join clauses. So do the clauseless join bit.
+ * construct that restricts join order, i.e., an outer join RHS or
+ * an IN (sub-SELECT) construct. Here, the rel may well have join
+ * clauses against stuff outside the OJ RHS or IN sub-SELECT, but
+ * the clauseless join *must* be done before we can make use of
+ * those join clauses. So do the clauseless join bit.
*
* See also the last-ditch case below.
*/
joinrelids = bms_union(rel1->relids, rel2->relids);
/*
- * If we have any outer joins, the proposed join might be illegal; and
- * in any case we have to determine its join type. Scan the OJ list
- * for conflicts.
+ * If we have any outer joins, the proposed join might be illegal; and in
+ * any case we have to determine its join type. Scan the OJ list for
+ * conflicts.
*/
jointype = JOIN_INNER; /* default if no match to an OJ */
is_valid_inner = true;
InClauseInfo *ininfo = (InClauseInfo *) lfirst(l);
/*
- * This IN clause is not relevant unless its RHS overlaps the
- * proposed join. (Check this first as a fast path for dismissing
- * most irrelevant INs quickly.)
+ * This IN clause is not relevant unless its RHS overlaps the proposed
+ * join. (Check this first as a fast path for dismissing most
+ * irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
/*
- * If we are still building the IN clause's RHS, then this IN
- * clause isn't relevant yet.
+ * If we are still building the IN clause's RHS, then this IN clause
+ * isn't relevant yet.
*/
if (bms_is_subset(joinrelids, ininfo->righthand))
continue;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.80 2006/07/14 14:52:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.81 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Use the generate_bitmap_or_paths() machinery to estimate the
* value of each OR clause. We can use regular restriction
* clauses along with the OR clause contents to generate
- * indexquals. We pass outer_rel = NULL so that sub-clauses
- * that are actually joins will be ignored.
+ * indexquals. We pass outer_rel = NULL so that sub-clauses that
+ * are actually joins will be ignored.
*/
List *orpaths;
ListCell *k;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.78 2006/08/17 17:02:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.79 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* representing a backwards scan of the index. Return NIL if can't do it.
*
* If 'canonical' is TRUE, we remove duplicate pathkeys (which can occur
- * if two index columns are equijoined, eg WHERE x = 1 AND y = 1). This
+ * if two index columns are equijoined, eg WHERE x = 1 AND y = 1). This
* is required if the result is to be compared directly to a canonical query
* pathkeys list. However, some callers want a list with exactly one entry
* per index column, and they must pass FALSE.
outer_expr = (Node *)
makeRelabelType((Expr *) outer_expr,
((RelabelType *) sub_key)->resulttype,
- ((RelabelType *) sub_key)->resulttypmod,
- ((RelabelType *) sub_key)->relabelformat);
+ ((RelabelType *) sub_key)->resulttypmod,
+ ((RelabelType *) sub_key)->relabelformat);
}
else
continue;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.27 2006/03/05 15:58:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/tidpath.c,v 1.28 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* CTID must be first argument */
if (arg1 && IsA(arg1, Var))
{
- Var *var = (Var *) arg1;
+ Var *var = (Var *) arg1;
if (var->varattno == SelfItemPointerAttributeNumber &&
var->vartype == TIDOID &&
{
foreach(l, ((BoolExpr *) expr)->args)
{
- List *frtn = TidQualFromExpr((Node *) lfirst(l), varno);
+ List *frtn = TidQualFromExpr((Node *) lfirst(l), varno);
if (frtn)
rlst = list_concat(rlst, frtn);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.216 2006/08/02 01:59:45 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.217 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static FunctionScan *create_functionscan_plan(PlannerInfo *root, Path *best_path,
List *tlist, List *scan_clauses);
static ValuesScan *create_valuesscan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static NestLoop *create_nestloop_plan(PlannerInfo *root, NestPath *best_path,
Plan *outer_plan, Plan *inner_plan);
static MergeJoin *create_mergejoin_plan(PlannerInfo *root, MergePath *best_path,
static FunctionScan *make_functionscan(List *qptlist, List *qpqual,
Index scanrelid);
static ValuesScan *make_valuesscan(List *qptlist, List *qpqual,
- Index scanrelid);
+ Index scanrelid);
static BitmapAnd *make_bitmap_and(List *bitmapplans);
static BitmapOr *make_bitmap_or(List *bitmapplans);
static NestLoop *make_nestloop(List *tlist,
tlist = build_relation_tlist(rel);
/*
- * Extract the relevant restriction clauses from the parent relation.
- * The executor must apply all these restrictions during the scan,
- * except for pseudoconstants which we'll take care of below.
+ * Extract the relevant restriction clauses from the parent relation. The
+ * executor must apply all these restrictions during the scan, except for
+ * pseudoconstants which we'll take care of below.
*/
scan_clauses = rel->baserestrictinfo;
}
/*
- * If there are any pseudoconstant clauses attached to this node,
- * insert a gating Result node that evaluates the pseudoconstants
- * as one-time quals.
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
*/
if (root->hasPseudoConstantQuals)
plan = create_gating_plan(root, plan, scan_clauses);
int i;
/*
- * We can do this for real relation scans, subquery scans, function
- * scans, and values scans (but not for, eg, joins).
+ * We can do this for real relation scans, subquery scans, function scans,
+ * and values scans (but not for, eg, joins).
*/
if (rel->rtekind != RTE_RELATION &&
rel->rtekind != RTE_SUBQUERY &&
}
/*
- * If there are any pseudoconstant clauses attached to this node,
- * insert a gating Result node that evaluates the pseudoconstants
- * as one-time quals.
+ * If there are any pseudoconstant clauses attached to this node, insert a
+ * gating Result node that evaluates the pseudoconstants as one-time
+ * quals.
*/
if (root->hasPseudoConstantQuals)
plan = create_gating_plan(root, plan, best_path->joinrestrictinfo);
*
* Unlike create_indexscan_plan(), we need take no special thought here
* for partial index predicates; this is because the predicate conditions
- * are already listed in bitmapqualorig and indexquals. Bitmap scans
- * have to do it that way because predicate conditions need to be rechecked
- * if the scan becomes lossy.
+ * are already listed in bitmapqualorig and indexquals. Bitmap scans have
+ * to do it that way because predicate conditions need to be rechecked if
+ * the scan becomes lossy.
*/
qpqual = NIL;
foreach(l, scan_clauses)
subindexquals = lappend(subindexquals,
make_ands_explicit(subindexqual));
}
+
/*
* In the presence of ScalarArrayOpExpr quals, we might have built
* BitmapOrPaths with just one subpath; don't add an OR step.
plan->total_cost = opath->path.total_cost;
plan->plan_rows =
clamp_row_est(opath->bitmapselectivity * opath->path.parent->tuples);
- plan->plan_width = 0; /* meaningless */
+ plan->plan_width = 0; /* meaningless */
}
/*
Expr *pred = (Expr *) lfirst(l);
/*
- * We know that the index predicate must have been implied by
- * the query condition as a whole, but it may or may not be
- * implied by the conditions that got pushed into the
- * bitmapqual. Avoid generating redundant conditions.
+ * We know that the index predicate must have been implied by the
+ * query condition as a whole, but it may or may not be implied by
+ * the conditions that got pushed into the bitmapqual. Avoid
+ * generating redundant conditions.
*/
if (!predicate_implied_by(list_make1(pred), ipath->indexclauses))
{
scan_clauses = extract_actual_clauses(scan_clauses, false);
/*
- * Remove any clauses that are TID quals. This is a bit tricky since
- * the tidquals list has implicit OR semantics.
+ * Remove any clauses that are TID quals. This is a bit tricky since the
+ * tidquals list has implicit OR semantics.
*/
ortidquals = best_path->tidquals;
if (list_length(ortidquals) > 1)
*/
static ValuesScan *
create_valuesscan_plan(PlannerInfo *root, Path *best_path,
- List *tlist, List *scan_clauses)
+ List *tlist, List *scan_clauses)
{
ValuesScan *scan_plan;
Index scan_relid = best_path->parent->relid;
* join quals; failing to prove that doesn't result in an incorrect
* plan. It is the right way to proceed because adding more quals to
* the stuff we got from the original query would just make it harder
- * to detect duplication. (Also, to change this we'd have to be
- * wary of UPDATE/DELETE/SELECT FOR UPDATE target relations; see
- * notes above about EvalPlanQual.)
+ * to detect duplication. (Also, to change this we'd have to be wary
+ * of UPDATE/DELETE/SELECT FOR UPDATE target relations; see notes
+ * above about EvalPlanQual.)
*/
BitmapHeapPath *innerpath = (BitmapHeapPath *) best_path->innerjoinpath;
if (IsA(clause, OpExpr))
{
- OpExpr *op = (OpExpr *) clause;
+ OpExpr *op = (OpExpr *) clause;
if (list_length(op->args) != 2)
elog(ERROR, "indexqual clause is not binary opclause");
else if (IsA(clause, RowCompareExpr))
{
RowCompareExpr *rc = (RowCompareExpr *) clause;
- ListCell *lc;
+ ListCell *lc;
/*
* Check to see if the indexkey is on the right; if so, commute
* attribute this is and change the indexkey operand as needed.
*
* Save the index opclass for only the first column. We will
- * return the operator and opclass info for just the first
- * column of the row comparison; the executor will have to
- * look up the rest if it needs them.
+ * return the operator and opclass info for just the first column
+ * of the row comparison; the executor will have to look up the
+ * rest if it needs them.
*/
foreach(lc, rc->largs)
{
- Oid tmp_opclass;
+ Oid tmp_opclass;
lfirst(lc) = fix_indexqual_operand(lfirst(lc),
index,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.122 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.123 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void add_vars_to_targetlist(PlannerInfo *root, List *vars,
Relids where_needed);
static List *deconstruct_recurse(PlannerInfo *root, Node *jtnode,
- bool below_outer_join, Relids *qualscope);
+ bool below_outer_join, Relids *qualscope);
static OuterJoinInfo *make_outerjoininfo(PlannerInfo *root,
- Relids left_rels, Relids right_rels,
- bool is_full_join, Node *clause);
+ Relids left_rels, Relids right_rels,
+ bool is_full_join, Node *clause);
static void distribute_qual_to_rels(PlannerInfo *root, Node *clause,
bool is_pushed_down,
bool is_deduced,
* appearing in the jointree.
*
* The initial invocation must pass root->parse->jointree as the value of
- * jtnode. Internally, the function recurses through the jointree.
+ * jtnode. Internally, the function recurses through the jointree.
*
* At the end of this process, there should be one baserel RelOptInfo for
* every non-join RTE that is used in the query. Therefore, this routine
* is the only place that should call build_simple_rel with reloptkind
- * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
+ * RELOPT_BASEREL. (Note: build_simple_rel recurses internally to build
* "other rel" RelOptInfos for the members of any appendrels we find here.)
*/
void
* deconstruct_jointree
* Recursively scan the query's join tree for WHERE and JOIN/ON qual
* clauses, and add these to the appropriate restrictinfo and joininfo
- * lists belonging to base RelOptInfos. Also, add OuterJoinInfo nodes
+ * lists belonging to base RelOptInfos. Also, add OuterJoinInfo nodes
* to root->oj_info_list for any outer joins appearing in the query tree.
* Return a "joinlist" data structure showing the join order decisions
* that need to be made by make_one_rel().
* be evaluated at the lowest level where all the variables it mentions are
* available. However, we cannot push a qual down into the nullable side(s)
* of an outer join since the qual might eliminate matching rows and cause a
- * NULL row to be incorrectly emitted by the join. Therefore, we artificially
+ * NULL row to be incorrectly emitted by the join. Therefore, we artificially
* OR the minimum-relids of such an outer join into the required_relids of
- * clauses appearing above it. This forces those clauses to be delayed until
+ * clauses appearing above it. This forces those clauses to be delayed until
* application of the outer join (or maybe even higher in the join tree).
*/
List *
ListCell *l;
/*
- * First, recurse to handle child joins. We collapse subproblems
- * into a single joinlist whenever the resulting joinlist wouldn't
- * exceed from_collapse_limit members. Also, always collapse
- * one-element subproblems, since that won't lengthen the joinlist
- * anyway.
+ * First, recurse to handle child joins. We collapse subproblems into
+ * a single joinlist whenever the resulting joinlist wouldn't exceed
+ * from_collapse_limit members. Also, always collapse one-element
+ * subproblems, since that won't lengthen the joinlist anyway.
*/
*qualscope = NULL;
joinlist = NIL;
remaining = list_length(f->fromlist);
foreach(l, f->fromlist)
{
- Relids sub_qualscope;
- List *sub_joinlist;
- int sub_members;
+ Relids sub_qualscope;
+ List *sub_joinlist;
+ int sub_members;
sub_joinlist = deconstruct_recurse(root, lfirst(l),
below_outer_join,
(list_length(leftjoinlist) + list_length(rightjoinlist) <=
join_collapse_limit))
joinlist = list_concat(leftjoinlist, rightjoinlist);
- else /* force the join order at this node */
+ else
+ /* force the join order at this node */
joinlist = list_make1(list_make2(leftjoinlist, rightjoinlist));
}
else
* any nullable rel is FOR UPDATE/SHARE.
*
* You might be wondering why this test isn't made far upstream in the
- * parser. It's because the parser hasn't got enough info --- consider
- * FOR UPDATE applied to a view. Only after rewriting and flattening
- * do we know whether the view contains an outer join.
+ * parser. It's because the parser hasn't got enough info --- consider
+ * FOR UPDATE applied to a view. Only after rewriting and flattening do
+ * we know whether the view contains an outer join.
*/
foreach(l, root->parse->rowMarks)
{
{
ojinfo->min_lefthand = left_rels;
ojinfo->min_righthand = right_rels;
- ojinfo->lhs_strict = false; /* don't care about this */
+ ojinfo->lhs_strict = false; /* don't care about this */
return ojinfo;
}
ojinfo->lhs_strict = bms_overlap(strict_relids, left_rels);
/*
- * Required LHS is basically the LHS rels mentioned in the clause...
- * but if there aren't any, punt and make it the full LHS, to avoid
- * having an empty min_lefthand which will confuse later processing.
- * (We don't try to be smart about such cases, just correct.)
- * We may have to add more rels based on lower outer joins; see below.
+ * Required LHS is basically the LHS rels mentioned in the clause... but
+ * if there aren't any, punt and make it the full LHS, to avoid having an
+ * empty min_lefthand which will confuse later processing. (We don't try
+ * to be smart about such cases, just correct.) We may have to add more
+ * rels based on lower outer joins; see below.
*/
ojinfo->min_lefthand = bms_intersect(clause_relids, left_rels);
if (bms_is_empty(ojinfo->min_lefthand))
ojinfo->min_lefthand = bms_copy(left_rels);
/*
- * Required RHS is normally the full set of RHS rels. Sometimes we
- * can exclude some, see below.
+ * Required RHS is normally the full set of RHS rels. Sometimes we can
+ * exclude some, see below.
*/
ojinfo->min_righthand = bms_copy(right_rels);
ojinfo->min_lefthand = bms_add_members(ojinfo->min_lefthand,
otherinfo->min_righthand);
}
+
/*
* For a lower OJ in our RHS, if our join condition does not use the
* lower join's RHS and the lower OJ's join condition is strict, we
* If the clause is an outer-join clause, we must force it to the OJ's
* semantic level to preserve semantics.
*
- * Otherwise, when the clause contains volatile functions, we force it
- * to be evaluated at its original syntactic level. This preserves the
+ * Otherwise, when the clause contains volatile functions, we force it to
+ * be evaluated at its original syntactic level. This preserves the
* expected semantics.
*
- * When the clause contains no volatile functions either, it is actually
- * a pseudoconstant clause that will not change value during any one
- * execution of the plan, and hence can be used as a one-time qual in
- * a gating Result plan node. We put such a clause into the regular
+ * When the clause contains no volatile functions either, it is actually a
+ * pseudoconstant clause that will not change value during any one
+ * execution of the plan, and hence can be used as a one-time qual in a
+ * gating Result plan node. We put such a clause into the regular
* RestrictInfo lists for the moment, but eventually createplan.c will
* pull it out and make a gating Result node immediately above whatever
- * plan node the pseudoconstant clause is assigned to. It's usually
- * best to put a gating node as high in the plan tree as possible.
- * If we are not below an outer join, we can actually push the
- * pseudoconstant qual all the way to the top of the tree. If we are
- * below an outer join, we leave the qual at its original syntactic level
- * (we could push it up to just below the outer join, but that seems more
- * complex than it's worth).
+ * plan node the pseudoconstant clause is assigned to. It's usually best
+ * to put a gating node as high in the plan tree as possible. If we are
+ * not below an outer join, we can actually push the pseudoconstant qual
+ * all the way to the top of the tree. If we are below an outer join, we
+ * leave the qual at its original syntactic level (we could push it up to
+ * just below the outer join, but that seems more complex than it's
+ * worth).
*/
if (bms_is_empty(relids))
{
* Mark the qual as "pushed down" if it can be applied at a level below
* its original syntactic level. This allows us to distinguish original
* JOIN/ON quals from higher-level quals pushed down to the same joinrel.
- * A qual originating from WHERE is always considered "pushed down".
- * Note that for an outer-join qual, we have to compare to ojscope not
+ * A qual originating from WHERE is always considered "pushed down". Note
+ * that for an outer-join qual, we have to compare to ojscope not
* qualscope.
*/
if (!is_pushed_down)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.21 2006/08/12 02:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planagg.c,v 1.22 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
Aggref *aggref = (Aggref *) node;
Oid aggsortop;
- Expr *curTarget;
+ Expr *curTarget;
MinMaxAggInfo *info;
ListCell *l;
subparse->limitOffset = NULL;
subparse->limitCount = (Node *) makeConst(INT8OID, sizeof(int64),
Int64GetDatum(1),
- false, false /* not by val */);
+ false, false /* not by val */ );
/*
* Generate the plan for the subquery. We already have a Path for the
* in our cost estimates. But that looks painful, and in most cases the
* fraction of NULLs isn't high enough to change the decision.
*
- * The NOT NULL qual has to go on the actual indexscan; create_plan
- * might have stuck a gating Result atop that, if there were any
- * pseudoconstant quals.
+ * The NOT NULL qual has to go on the actual indexscan; create_plan might
+ * have stuck a gating Result atop that, if there were any pseudoconstant
+ * quals.
*/
plan = create_plan(&subroot, (Path *) info->path);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.96 2006/09/19 22:49:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planmain.c,v 1.97 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Construct RelOptInfo nodes for all base relations in query, and
* indirectly for all appendrel member relations ("other rels"). This
- * will give us a RelOptInfo for every "simple" (non-join) rel involved
- * in the query.
+ * will give us a RelOptInfo for every "simple" (non-join) rel involved in
+ * the query.
*
* Note: the reason we find the rels by searching the jointree and
* appendrel list, rather than just scanning the rangetable, is that the
add_base_rels_to_query(root, (Node *) parse->jointree);
/*
- * We should now have size estimates for every actual table involved
- * in the query, so we can compute total_table_pages. Note that
- * appendrels are not double-counted here, even though we don't bother
- * to distinguish RelOptInfos for appendrel parents, because the parents
- * will still have size zero.
+ * We should now have size estimates for every actual table involved in
+ * the query, so we can compute total_table_pages. Note that appendrels
+ * are not double-counted here, even though we don't bother to distinguish
+ * RelOptInfos for appendrel parents, because the parents will still have
+ * size zero.
*
* XXX if a table is self-joined, we will count it once per appearance,
* which perhaps is the wrong thing ... but that's not completely clear,
if (brel == NULL)
continue;
- Assert(brel->relid == rti); /* sanity check on array */
+ Assert(brel->relid == rti); /* sanity check on array */
total_pages += (double) brel->pages;
}
* Examine the targetlist and qualifications, adding entries to baserel
* targetlists for all referenced Vars. Restrict and join clauses are
* added to appropriate lists belonging to the mentioned relations. We
- * also build lists of equijoined keys for pathkey construction, and
- * form a target joinlist for make_one_rel() to work from.
+ * also build lists of equijoined keys for pathkey construction, and form
+ * a target joinlist for make_one_rel() to work from.
*
* Note: all subplan nodes will have "flat" (var-only) tlists. This
* implies that all expression evaluations are done at the root of the
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.208 2006/08/12 02:52:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.209 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If the query has any join RTEs, replace join alias variables with
* base-relation variables. We must do this before sublink processing,
- * else sublinks expanded out from join aliases wouldn't get processed.
- * We can skip it in VALUES lists, however, since they can't contain
- * any Vars at all.
+ * else sublinks expanded out from join aliases wouldn't get processed. We
+ * can skip it in VALUES lists, however, since they can't contain any Vars
+ * at all.
*/
if (root->hasJoinRTEs && kind != EXPRKIND_VALUES)
expr = flatten_join_alias_vars(root, expr);
* still must do it for quals (to get AND/OR flatness); and if we are in a
* subquery we should not assume it will be done only once.
*
- * For VALUES lists we never do this at all, again on the grounds that
- * we should optimize for one-time evaluation.
+ * For VALUES lists we never do this at all, again on the grounds that we
+ * should optimize for one-time evaluation.
*/
if (kind != EXPRKIND_VALUES &&
(root->parse->jointree->fromlist != NIL ||
subplan = grouping_planner(&subroot, 0.0 /* retrieve all tuples */ );
/*
- * If this child rel was excluded by constraint exclusion, exclude
- * it from the plan.
+ * If this child rel was excluded by constraint exclusion, exclude it
+ * from the plan.
*/
if (is_dummy_plan(subplan))
continue;
/*
* Deal with the RETURNING clause if any. It's convenient to pass the
- * returningList through setrefs.c now rather than at top level (if
- * we waited, handling inherited UPDATE/DELETE would be much harder).
+ * returningList through setrefs.c now rather than at top level (if we
+ * waited, handling inherited UPDATE/DELETE would be much harder).
*/
if (parse->returningList)
{
- List *rlist;
+ List *rlist;
rlist = set_returning_clause_references(parse->returningList,
result_plan,
{
if (IsA(plan, Result))
{
- List *rcqual = (List *) ((Result *) plan)->resconstantqual;
+ List *rcqual = (List *) ((Result *) plan)->resconstantqual;
if (list_length(rcqual) == 1)
{
- Const *constqual = (Const *) linitial(rcqual);
+ Const *constqual = (Const *) linitial(rcqual);
if (constqual && IsA(constqual, Const))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.125 2006/08/28 14:32:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.126 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return false; /* tlist doesn't match junk status */
/*
- * We accept either a Var referencing the corresponding element of
- * the subplan tlist, or a Const equaling the subplan element.
- * See generate_setop_tlist() for motivation.
+ * We accept either a Var referencing the corresponding element of the
+ * subplan tlist, or a Const equaling the subplan element. See
+ * generate_setop_tlist() for motivation.
*/
if (ptle->expr && IsA(ptle->expr, Var))
{
- Var *var = (Var *) ptle->expr;
+ Var *var = (Var *) ptle->expr;
Assert(var->varno == plan->scan.scanrelid);
Assert(var->varlevelsup == 0);
*
* To handle bitmap-scan plan trees, we have to be able to recurse down
* to the bottom BitmapIndexScan nodes; likewise, appendrel indexscans
- * require recursing through Append nodes. This is split out as a separate
+ * require recursing through Append nodes. This is split out as a separate
* function so that it can recurse.
*/
static void
* adjust any Vars that refer to other tables to reference junk tlist
* entries in the top plan's targetlist. Vars referencing the result
* table should be left alone, however (the executor will evaluate them
- * using the actual heap tuple, after firing triggers if any). In the
+ * using the actual heap tuple, after firing triggers if any). In the
* adjusted RETURNING list, result-table Vars will still have their
* original varno, but Vars for other rels will have varno OUTER.
*
/*
* We can perform the desired Var fixup by abusing the join_references
- * machinery that normally handles inner indexscan fixup. We search
- * the top plan's targetlist for Vars of non-result relations, and use
+ * machinery that normally handles inner indexscan fixup. We search the
+ * top plan's targetlist for Vars of non-result relations, and use
* join_references to convert RETURNING Vars into references to those
* tlist entries, while leaving result-rel Vars as-is.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.111 2006/08/02 01:59:46 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.112 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *convert_testexpr(Node *testexpr,
- int rtindex,
- List **righthandIds);
+ int rtindex,
+ List **righthandIds);
static Node *convert_testexpr_mutator(Node *node,
- convert_testexpr_context *context);
+ convert_testexpr_context *context);
static bool subplan_is_hashable(SubLink *slink, SubPlan *node);
static bool hash_ok_operator(OpExpr *expr);
static Node *replace_correlation_vars_mutator(Node *node, void *context);
return NULL;
if (IsA(node, Param))
{
- Param *param = (Param *) node;
+ Param *param = (Param *) node;
if (param->paramkind == PARAM_SUBLINK)
{
/*
- * We expect to encounter the Params in column-number sequence.
- * We could handle non-sequential order if necessary, but for now
+ * We expect to encounter the Params in column-number sequence. We
+ * could handle non-sequential order if necessary, but for now
* there's no need. (This is also a useful cross-check that we
* aren't finding any unexpected Params.)
*/
if (context->rtindex)
{
/* Make the Var node representing the subplan's result */
- Var *newvar;
+ Var *newvar;
newvar = makeVar(context->rtindex,
param->paramid,
param->paramtype,
-1,
0);
+
/*
* Copy it for caller. NB: we need a copy to avoid having
* doubly-linked substructure in the modified parse tree.
return false;
/*
- * The estimated size of the subquery result must fit in work_mem.
- * (Note: we use sizeof(HeapTupleHeaderData) here even though the tuples
- * will actually be stored as MinimalTuples; this provides some fudge
- * factor for hashtable overhead.)
+ * The estimated size of the subquery result must fit in work_mem. (Note:
+ * we use sizeof(HeapTupleHeaderData) here even though the tuples will
+ * actually be stored as MinimalTuples; this provides some fudge factor
+ * for hashtable overhead.)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleHeaderData)));
{
foreach(l, ((BoolExpr *) slink->testexpr)->args)
{
- Node *andarg = (Node *) lfirst(l);
+ Node *andarg = (Node *) lfirst(l);
if (!IsA(andarg, OpExpr))
return false; /* probably can't happen */
return NULL;
if (sublink->testexpr && IsA(sublink->testexpr, OpExpr))
{
- List *opclasses;
- List *opstrats;
+ List *opclasses;
+ List *opstrats;
get_op_btree_interpretation(((OpExpr *) sublink->testexpr)->opno,
&opclasses, &opstrats);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.43 2006/08/19 02:48:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.44 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
} reduce_outer_joins_state;
static Node *pull_up_simple_subquery(PlannerInfo *root, Node *jtnode,
- RangeTblEntry *rte,
- bool below_outer_join,
- bool append_rel_member);
+ RangeTblEntry *rte,
+ bool below_outer_join,
+ bool append_rel_member);
static Node *pull_up_simple_union_all(PlannerInfo *root, Node *jtnode,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
static void pull_up_union_leaf_queries(Node *setOp, PlannerInfo *root,
- int parentRTindex, Query *setOpQuery);
+ int parentRTindex, Query *setOpQuery);
static void make_setop_translation_lists(Query *query,
Index newvarno,
List **col_mappings, List **translated_vars);
static bool is_simple_subquery(Query *subquery);
static bool is_simple_union_all(Query *subquery);
static bool is_simple_union_all_recurse(Node *setOp, Query *setOpQuery,
- List *colTypes);
+ List *colTypes);
static bool has_nullable_targetlist(Query *subquery);
static bool is_safe_append_member(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno,
static void fix_in_clause_relids(List *in_info_list, int varno,
Relids subrelids);
static void fix_append_rel_relids(List *append_rel_list, int varno,
- Relids subrelids);
+ Relids subrelids);
static Node *find_jointree_node_for_rel(Node *jtnode, int relid);
* side of an outer join. This restricts what we can do.
*
* append_rel_member is true if we are looking at a member subquery of
- * an append relation. This puts some different restrictions on what
+ * an append relation. This puts some different restrictions on what
* we can do.
*
* A tricky aspect of this code is that if we pull up a subquery we have
* variables evaluated at the right place in the modified plan tree.
* Fix it someday.
*
- * If we are looking at an append-relation member, we can't pull
- * it up unless is_safe_append_member says so.
+ * If we are looking at an append-relation member, we can't pull it up
+ * unless is_safe_append_member says so.
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_subquery(rte->subquery) &&
/*
* Alternatively, is it a simple UNION ALL subquery? If so, flatten
- * into an "append relation". We can do this regardless of nullability
- * considerations since this transformation does not result in
- * propagating non-Var expressions into upper levels of the query.
+ * into an "append relation". We can do this regardless of
+ * nullability considerations since this transformation does not
+ * result in propagating non-Var expressions into upper levels of the
+ * query.
*
* It's also safe to do this regardless of whether this query is
- * itself an appendrel member. (If you're thinking we should try
- * to flatten the two levels of appendrel together, you're right;
- * but we handle that in set_append_rel_pathlist, not here.)
+ * itself an appendrel member. (If you're thinking we should try to
+ * flatten the two levels of appendrel together, you're right; but we
+ * handle that in set_append_rel_pathlist, not here.)
*/
if (rte->rtekind == RTE_SUBQUERY &&
is_simple_union_all(rte->subquery))
* Attempt to pull up a single simple subquery.
*
* jtnode is a RangeTblRef that has been tentatively identified as a simple
- * subquery by pull_up_subqueries. We return the replacement jointree node,
+ * subquery by pull_up_subqueries. We return the replacement jointree node,
* or jtnode itself if we determine that the subquery can't be pulled up after
* all.
*/
ListCell *rt;
/*
- * Need a modifiable copy of the subquery to hack on. Even if we
- * didn't sometimes choose not to pull up below, we must do this
- * to avoid problems if the same subquery is referenced from
- * multiple jointree items (which can't happen normally, but might
- * after rule rewriting).
+ * Need a modifiable copy of the subquery to hack on. Even if we didn't
+ * sometimes choose not to pull up below, we must do this to avoid
+ * problems if the same subquery is referenced from multiple jointree
+ * items (which can't happen normally, but might after rule rewriting).
*/
subquery = copyObject(rte->subquery);
* Create a PlannerInfo data structure for this subquery.
*
* NOTE: the next few steps should match the first processing in
- * subquery_planner(). Can we refactor to avoid code duplication,
- * or would that just make things uglier?
+ * subquery_planner(). Can we refactor to avoid code duplication, or
+ * would that just make things uglier?
*/
subroot = makeNode(PlannerInfo);
subroot->parse = subquery;
subroot->append_rel_list = NIL;
/*
- * Pull up any IN clauses within the subquery's WHERE, so that we
- * don't leave unoptimized INs behind.
+ * Pull up any IN clauses within the subquery's WHERE, so that we don't
+ * leave unoptimized INs behind.
*/
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subroot,
- subquery->jointree->quals);
+ subquery->jointree->quals);
/*
* Recursively pull up the subquery's subqueries, so that
*
* Note: below_outer_join = false is correct here even if we are within an
* outer join in the upper query; the lower query starts with a clean
- * slate for outer-join semantics. Likewise, we say we aren't handling
- * an appendrel member.
+ * slate for outer-join semantics. Likewise, we say we aren't handling an
+ * appendrel member.
*/
subquery->jointree = (FromExpr *)
pull_up_subqueries(subroot, (Node *) subquery->jointree, false, false);
/*
- * Now we must recheck whether the subquery is still simple enough
- * to pull up. If not, abandon processing it.
+ * Now we must recheck whether the subquery is still simple enough to pull
+ * up. If not, abandon processing it.
*
- * We don't really need to recheck all the conditions involved,
- * but it's easier just to keep this "if" looking the same as the
- * one in pull_up_subqueries.
+ * We don't really need to recheck all the conditions involved, but it's
+ * easier just to keep this "if" looking the same as the one in
+ * pull_up_subqueries.
*/
if (is_simple_subquery(subquery) &&
(!below_outer_join || has_nullable_targetlist(subquery)) &&
/*
* Give up, return unmodified RangeTblRef.
*
- * Note: The work we just did will be redone when the subquery
- * gets planned on its own. Perhaps we could avoid that by
- * storing the modified subquery back into the rangetable, but
- * I'm not gonna risk it now.
+ * Note: The work we just did will be redone when the subquery gets
+ * planned on its own. Perhaps we could avoid that by storing the
+ * modified subquery back into the rangetable, but I'm not gonna risk
+ * it now.
*/
return jtnode;
}
/*
- * Adjust level-0 varnos in subquery so that we can append its
- * rangetable to upper query's. We have to fix the subquery's
- * in_info_list and append_rel_list, as well.
+ * Adjust level-0 varnos in subquery so that we can append its rangetable
+ * to upper query's. We have to fix the subquery's in_info_list and
+ * append_rel_list, as well.
*/
rtoffset = list_length(parse->rtable);
OffsetVarNodes((Node *) subquery, rtoffset, 0);
OffsetVarNodes((Node *) subroot->append_rel_list, rtoffset, 0);
/*
- * Upper-level vars in subquery are now one level closer to their
- * parent than before.
+ * Upper-level vars in subquery are now one level closer to their parent
+ * than before.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->in_info_list, -1, 1);
IncrementVarSublevelsUp((Node *) subroot->append_rel_list, -1, 1);
/*
- * Replace all of the top query's references to the subquery's
- * outputs with copies of the adjusted subtlist items, being
- * careful not to replace any of the jointree structure. (This'd
- * be a lot cleaner if we could use query_tree_mutator.)
+ * Replace all of the top query's references to the subquery's outputs
+ * with copies of the adjusted subtlist items, being careful not to
+ * replace any of the jointree structure. (This'd be a lot cleaner if we
+ * could use query_tree_mutator.)
*/
subtlist = subquery->targetList;
parse->targetList = (List *)
}
/*
- * Now append the adjusted rtable entries to upper query. (We hold
- * off until after fixing the upper rtable entries; no point in
- * running that code on the subquery ones too.)
+ * Now append the adjusted rtable entries to upper query. (We hold off
+ * until after fixing the upper rtable entries; no point in running that
+ * code on the subquery ones too.)
*/
parse->rtable = list_concat(parse->rtable, subquery->rtable);
/*
- * Pull up any FOR UPDATE/SHARE markers, too. (OffsetVarNodes
- * already adjusted the marker rtindexes, so just concat the lists.)
+ * Pull up any FOR UPDATE/SHARE markers, too. (OffsetVarNodes already
+ * adjusted the marker rtindexes, so just concat the lists.)
*/
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
/*
- * We also have to fix the relid sets of any parent InClauseInfo
- * nodes. (This could perhaps be done by ResolveNew, but it would
- * clutter that routine's API unreasonably.)
+ * We also have to fix the relid sets of any parent InClauseInfo nodes.
+ * (This could perhaps be done by ResolveNew, but it would clutter that
+ * routine's API unreasonably.)
*
- * Likewise, relids appearing in AppendRelInfo nodes have to be fixed
- * (but we took care of their translated_vars lists above). We already
- * checked that this won't require introducing multiple subrelids into
- * the single-slot AppendRelInfo structs.
+ * Likewise, relids appearing in AppendRelInfo nodes have to be fixed (but
+ * we took care of their translated_vars lists above). We already checked
+ * that this won't require introducing multiple subrelids into the
+ * single-slot AppendRelInfo structs.
*/
if (root->in_info_list || root->append_rel_list)
{
subroot->append_rel_list);
/*
- * We don't have to do the equivalent bookkeeping for outer-join
- * info, because that hasn't been set up yet.
+ * We don't have to do the equivalent bookkeeping for outer-join info,
+ * because that hasn't been set up yet.
*/
Assert(root->oj_info_list == NIL);
Assert(subroot->oj_info_list == NIL);
/* subquery won't be pulled up if it hasAggs, so no work there */
/*
- * Return the adjusted subquery jointree to replace the
- * RangeTblRef entry in parent's jointree.
+ * Return the adjusted subquery jointree to replace the RangeTblRef entry
+ * in parent's jointree.
*/
return (Node *) subquery->jointree;
}
* Pull up a single simple UNION ALL subquery.
*
* jtnode is a RangeTblRef that has been identified as a simple UNION ALL
- * subquery by pull_up_subqueries. We pull up the leaf subqueries and
+ * subquery by pull_up_subqueries. We pull up the leaf subqueries and
* build an "append relation" for the union set. The result value is just
* jtnode, since we don't actually need to change the query jointree.
*/
/*
* Upper-level vars in subquery are now one level closer to their
- * parent than before. We don't have to worry about offsetting
- * varnos, though, because any such vars must refer to stuff above
- * the level of the query we are pulling into.
+ * parent than before. We don't have to worry about offsetting
+ * varnos, though, because any such vars must refer to stuff above the
+ * level of the query we are pulling into.
*/
IncrementVarSublevelsUp((Node *) subquery, -1, 1);
/*
* Don't pull up a subquery that has any volatile functions in its
- * targetlist. Otherwise we might introduce multiple evaluations of
- * these functions, if they get copied to multiple places in the upper
- * query, leading to surprising results.
+ * targetlist. Otherwise we might introduce multiple evaluations of these
+ * functions, if they get copied to multiple places in the upper query,
+ * leading to surprising results.
*/
if (contain_volatile_functions((Node *) subquery->targetList))
return false;
ListCell *l;
/*
- * It's only safe to pull up the child if its jointree contains
- * exactly one RTE, else the AppendRelInfo data structure breaks.
- * The one base RTE could be buried in several levels of FromExpr,
- * however.
+ * It's only safe to pull up the child if its jointree contains exactly
+ * one RTE, else the AppendRelInfo data structure breaks. The one base RTE
+ * could be buried in several levels of FromExpr, however.
*
- * Also, the child can't have any WHERE quals because there's no
- * place to put them in an appendrel. (This is a bit annoying...)
- * If we didn't need to check this, we'd just test whether
- * get_relids_in_jointree() yields a singleton set, to be more
- * consistent with the coding of fix_append_rel_relids().
+ * Also, the child can't have any WHERE quals because there's no place to
+ * put them in an appendrel. (This is a bit annoying...) If we didn't
+ * need to check this, we'd just test whether get_relids_in_jointree()
+ * yields a singleton set, to be more consistent with the coding of
+ * fix_append_rel_relids().
*/
jtnode = subquery->jointree;
while (IsA(jtnode, FromExpr))
/*
* XXX For the moment we also have to insist that the subquery's tlist
* includes only simple Vars. This is pretty annoying, but fixing it
- * seems to require nontrivial changes --- mainly because joinrel
- * tlists are presently assumed to contain only Vars. Perhaps a
- * pseudo-variable mechanism similar to the one speculated about
- * in pull_up_subqueries' comments would help? FIXME someday.
+ * seems to require nontrivial changes --- mainly because joinrel tlists
+ * are presently assumed to contain only Vars. Perhaps a pseudo-variable
+ * mechanism similar to the one speculated about in pull_up_subqueries'
+ * comments would help? FIXME someday.
*/
foreach(l, subquery->targetList)
{
/*
* We only want to extract the member relid once, but we mustn't fail
- * immediately if there are multiple members; it could be that none of
- * the AppendRelInfo nodes refer to it. So compute it on first use.
- * Note that bms_singleton_member will complain if set is not singleton.
+ * immediately if there are multiple members; it could be that none of the
+ * AppendRelInfo nodes refer to it. So compute it on first use. Note that
+ * bms_singleton_member will complain if set is not singleton.
*/
foreach(l, append_rel_list)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.55 2006/07/14 14:52:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.56 2006/10/04 00:29:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (negator)
{
- OpExpr *newopexpr = makeNode(OpExpr);
+ OpExpr *newopexpr = makeNode(OpExpr);
newopexpr->opno = negator;
newopexpr->opfuncid = InvalidOid;
{
/*
* Negate a ScalarArrayOpExpr if there is a negator for its operator;
- * for example x = ANY (list) becomes x <> ALL (list).
- * Otherwise, retain the clause as it is (the NOT can't be pushed down
- * any farther).
+ * for example x = ANY (list) becomes x <> ALL (list). Otherwise,
+ * retain the clause as it is (the NOT can't be pushed down any
+ * farther).
*/
ScalarArrayOpExpr *saopexpr = (ScalarArrayOpExpr *) qual;
Oid negator = get_negator(saopexpr->opno);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.83 2006/08/12 02:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/preptlist.c,v 1.84 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If the query has a RETURNING list, add resjunk entries for any Vars
* used in RETURNING that belong to other relations. We need to do this
- * to make these Vars available for the RETURNING calculation. Vars
- * that belong to the result rel don't need to be added, because they
- * will be made to refer to the actual heap tuple.
+ * to make these Vars available for the RETURNING calculation. Vars that
+ * belong to the result rel don't need to be added, because they will be
+ * made to refer to the actual heap tuple.
*/
if (parse->returningList && list_length(parse->rtable) > 1)
{
- List *vars;
+ List *vars;
ListCell *l;
vars = pull_var_clause((Node *) parse->returningList, false);
*
* There are two code paths in the planner for set-operation queries.
* If a subquery consists entirely of simple UNION ALL operations, it
- * is converted into an "append relation". Otherwise, it is handled
+ * is converted into an "append relation". Otherwise, it is handled
* by the general code in this module (plan_set_operations and its
* subroutines). There is some support code here for the append-relation
* case, but most of the heavy lifting for that is done elsewhere,
* notably in prepjointree.c and allpaths.c.
*
* There is also some code here to support planning of queries that use
- * inheritance (SELECT FROM foo*). Inheritance trees are converted into
+ * inheritance (SELECT FROM foo*). Inheritance trees are converted into
* append relations, and thenceforth share code with the UNION ALL case.
*
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.133 2006/08/10 02:36:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.134 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *input_plans,
List *refnames_tlist);
static void expand_inherited_rtentry(PlannerInfo *root, RangeTblEntry *rte,
- Index rti);
+ Index rti);
static void make_inh_translation_lists(Relation oldrelation,
- Relation newrelation,
- Index newvarno,
- List **col_mappings,
- List **translated_vars);
+ Relation newrelation,
+ Index newvarno,
+ List **col_mappings,
+ List **translated_vars);
static Node *adjust_appendrel_attrs_mutator(Node *node,
- AppendRelInfo *context);
+ AppendRelInfo *context);
static Relids adjust_relid_set(Relids relids, Index oldrelid, Index newrelid);
static List *adjust_inherited_tlist(List *tlist,
AppendRelInfo *context);
/*
* expand_inherited_tables
* Expand each rangetable entry that represents an inheritance set
- * into an "append relation". At the conclusion of this process,
+ * into an "append relation". At the conclusion of this process,
* the "inh" flag is set in all and only those RTEs that are append
* relation parents.
*/
void
expand_inherited_tables(PlannerInfo *root)
{
- Index nrtes;
- Index rti;
- ListCell *rl;
+ Index nrtes;
+ Index rti;
+ ListCell *rl;
/*
- * expand_inherited_rtentry may add RTEs to parse->rtable; there is
- * no need to scan them since they can't have inh=true. So just
- * scan as far as the original end of the rtable list.
+ * expand_inherited_rtentry may add RTEs to parse->rtable; there is no
+ * need to scan them since they can't have inh=true. So just scan as far
+ * as the original end of the rtable list.
*/
nrtes = list_length(root->parse->rtable);
rl = list_head(root->parse->rtable);
* Check whether a rangetable entry represents an inheritance set.
* If so, add entries for all the child tables to the query's
* rangetable, and build AppendRelInfo nodes for all the child tables
- * and add them to root->append_rel_list. If not, clear the entry's
+ * and add them to root->append_rel_list. If not, clear the entry's
* "inh" flag to prevent later code from looking for AppendRelInfos.
*
* Note that the original RTE is considered to represent the whole
}
/*
- * Must open the parent relation to examine its tupdesc. We need not
- * lock it since the rewriter already obtained at least AccessShareLock
- * on each relation used in the query.
+ * Must open the parent relation to examine its tupdesc. We need not lock
+ * it since the rewriter already obtained at least AccessShareLock on each
+ * relation used in the query.
*/
oldrelation = heap_open(parentOID, NoLock);
/*
- * However, for each child relation we add to the query, we must obtain
- * an appropriate lock, because this will be the first use of those
- * relations in the parse/rewrite/plan pipeline.
+ * However, for each child relation we add to the query, we must obtain an
+ * appropriate lock, because this will be the first use of those relations
+ * in the parse/rewrite/plan pipeline.
*
* If the parent relation is the query's result relation, then we need
* RowExclusiveLock. Otherwise, check to see if the relation is accessed
* FOR UPDATE/SHARE or not. We can't just grab AccessShareLock because
* then the executor would be trying to upgrade the lock, leading to
- * possible deadlocks. (This code should match the parser and rewriter.)
+ * possible deadlocks. (This code should match the parser and rewriter.)
*/
if (rti == parse->resultRelation)
lockmode = RowExclusiveLock;
/*
* The executor will check the parent table's access permissions when it
- * examines the parent's added RTE entry. There's no need to check
- * twice, so turn off access check bits in the original RTE.
+ * examines the parent's added RTE entry. There's no need to check twice,
+ * so turn off access check bits in the original RTE.
*/
rte->requiredPerms = 0;
}
atttypmod = att->atttypmod;
/*
- * When we are generating the "translation list" for the parent
- * table of an inheritance set, no need to search for matches.
+ * When we are generating the "translation list" for the parent table
+ * of an inheritance set, no need to search for matches.
*/
if (oldrelation == newrelation)
{
/*
* Otherwise we have to search for the matching column by name.
- * There's no guarantee it'll have the same column position,
- * because of cases like ALTER TABLE ADD COLUMN and multiple
- * inheritance.
+ * There's no guarantee it'll have the same column position, because
+ * of cases like ALTER TABLE ADD COLUMN and multiple inheritance.
*/
for (new_attno = 0; new_attno < newnatts; new_attno++)
{
if (atttypid != att->atttypid || atttypmod != att->atttypmod)
elog(ERROR, "attribute \"%s\" of relation \"%s\" does not match parent's type",
attname, RelationGetRelationName(newrelation));
-
+
numbers = lappend_int(numbers, new_attno + 1);
vars = lappend(vars, makeVar(newvarno,
(AttrNumber) (new_attno + 1),
var->varnoold = context->child_relid;
if (var->varattno > 0)
{
- Node *newnode;
+ Node *newnode;
if (var->varattno > list_length(context->translated_vars))
elog(ERROR, "attribute %d of relation \"%s\" does not exist",
else if (var->varattno == 0)
{
/*
- * Whole-row Var: if we are dealing with named rowtypes,
- * we can use a whole-row Var for the child table plus a
- * coercion step to convert the tuple layout to the parent's
- * rowtype. Otherwise we have to generate a RowExpr.
+ * Whole-row Var: if we are dealing with named rowtypes, we
+ * can use a whole-row Var for the child table plus a coercion
+ * step to convert the tuple layout to the parent's rowtype.
+ * Otherwise we have to generate a RowExpr.
*/
if (OidIsValid(context->child_reltype))
{
* BUT: although we don't need to recurse into subplans, we do need to
* make sure that they are copied, not just referenced as
* expression_tree_mutator will do by default. Otherwise we'll have the
- * same subplan node referenced from each arm of the finished APPEND
- * plan, which will cause trouble in the executor. This is a kluge that
- * should go away when we redesign querytrees.
+ * same subplan node referenced from each arm of the finished APPEND plan,
+ * which will cause trouble in the executor. This is a kluge that should
+ * go away when we redesign querytrees.
*/
if (is_subplan(node))
{
*
* The relid sets are adjusted by substituting child_relid for parent_relid.
* (NOTE: oldrel is not necessarily the parent_relid relation!) We are also
- * careful to map attribute numbers within the array properly. User
+ * careful to map attribute numbers within the array properly. User
* attributes have to be mapped through col_mappings, but system attributes
* and whole-row references always have the same attno.
*
foreach(tl, tlist)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
- int newattno;
+ int newattno;
if (tle->resjunk)
continue; /* ignore junk items */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.221 2006/09/28 20:51:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.222 2006/10/04 00:29:55 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
Form_pg_aggregate aggform;
Oid aggtranstype;
int i;
- ListCell *l;
+ ListCell *l;
Assert(aggref->agglevelsup == 0);
counts->numAggs++;
*
* Returns the set of all Relids that are referenced in the clause in such
* a way that the clause cannot possibly return TRUE if any of these Relids
- * is an all-NULL row. (It is OK to err on the side of conservatism; hence
+ * is an all-NULL row. (It is OK to err on the side of conservatism; hence
* the analysis here is simplistic.)
*
* The semantics here are subtly different from contain_nonstrict_functions:
static bool
is_strict_saop(ScalarArrayOpExpr *expr, bool falseOK)
{
- Node *rightop;
+ Node *rightop;
/* The contained operator must be strict. */
if (!op_strict(expr->opno))
}
clause->opnos = newops;
+
/*
- * Note: we don't bother to update the opclasses list, but just set
- * it to empty. This is OK since this routine is currently only used
- * for index quals, and the index machinery won't use the opclass
- * information. The original opclass list is NOT valid if we have
- * commuted any cross-type comparisons, so don't leave it in place.
+ * Note: we don't bother to update the opclasses list, but just set it to
+ * empty. This is OK since this routine is currently only used for index
+ * quals, and the index machinery won't use the opclass information. The
+ * original opclass list is NOT valid if we have commuted any cross-type
+ * comparisons, so don't leave it in place.
*/
clause->opclasses = NIL; /* XXX */
context);
if (arg && IsA(arg, RowExpr))
{
- RowExpr *rarg = (RowExpr *) arg;
- List *newargs = NIL;
- ListCell *l;
+ RowExpr *rarg = (RowExpr *) arg;
+ List *newargs = NIL;
+ ListCell *l;
/*
* We break ROW(...) IS [NOT] NULL into separate tests on its
*/
foreach(l, rarg->args)
{
- Node *relem = (Node *) lfirst(l);
+ Node *relem = (Node *) lfirst(l);
/*
- * A constant field refutes the whole NullTest if it's of
- * the wrong nullness; else we can discard it.
+ * A constant field refutes the whole NullTest if it's of the
+ * wrong nullness; else we can discard it.
*/
if (relem && IsA(relem, Const))
{
- Const *carg = (Const *) relem;
+ Const *carg = (Const *) relem;
if (carg->constisnull ?
(ntest->nulltesttype == IS_NOT_NULL) :
}
if (arg && IsA(arg, Const))
{
- Const *carg = (Const *) arg;
- bool result;
+ Const *carg = (Const *) arg;
+ bool result;
switch (ntest->nulltesttype)
{
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) ntest->nulltesttype);
- result = false; /* keep compiler quiet */
+ result = false; /* keep compiler quiet */
break;
}
context);
if (arg && IsA(arg, Const))
{
- Const *carg = (Const *) arg;
- bool result;
+ Const *carg = (Const *) arg;
+ bool result;
switch (btest->booltesttype)
{
default:
elog(ERROR, "unrecognized booltesttype: %d",
(int) btest->booltesttype);
- result = false; /* keep compiler quiet */
+ result = false; /* keep compiler quiet */
break;
}
break;
case T_Aggref:
{
- Aggref *expr = (Aggref *) node;
+ Aggref *expr = (Aggref *) node;
if (expression_tree_walker((Node *) expr->args,
walker, context))
if (query->utilityStmt)
{
/*
- * Certain utility commands contain general-purpose Querys embedded
- * in them --- if this is one, invoke the walker on the sub-Query.
+ * Certain utility commands contain general-purpose Querys embedded in
+ * them --- if this is one, invoke the walker on the sub-Query.
*/
if (IsA(query->utilityStmt, CopyStmt))
{
break;
case T_RowCompareExpr:
{
- RowCompareExpr *rcexpr = (RowCompareExpr *) node;
- RowCompareExpr *newnode;
+ RowCompareExpr *rcexpr = (RowCompareExpr *) node;
+ RowCompareExpr *newnode;
FLATCOPY(newnode, rcexpr, RowCompareExpr);
MUTATE(newnode->largs, rcexpr->largs, List *);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.132 2006/08/02 01:59:46 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.133 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* for an ordered index, or NoMovementScanDirection for
* an unordered index.
* 'outer_rel' is the outer relation if this is a join inner indexscan path.
- * (pathkeys and indexscandir are ignored if so.) NULL if not.
+ * (pathkeys and indexscandir are ignored if so.) NULL if not.
*
* Returns the new path node.
*/
/* Ideally should define cost_result(), but I'm too lazy */
pathnode->path.startup_cost = 0;
pathnode->path.total_cost = cpu_tuple_cost;
+
/*
- * In theory we should include the qual eval cost as well, but
- * at present that doesn't accomplish much except duplicate work that
- * will be done again in make_result; since this is only used for
- * degenerate cases, nothing interesting will be done with the path
- * cost values...
+ * In theory we should include the qual eval cost as well, but at present
+ * that doesn't accomplish much except duplicate work that will be done
+ * again in make_result; since this is only used for degenerate cases,
+ * nothing interesting will be done with the path cost values...
*/
return pathnode;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.126 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.127 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *indexinfos = NIL;
/*
- * We need not lock the relation since it was already locked, either
- * by the rewriter or when expand_inherited_rtentry() added it to the
- * query's rangetable.
+ * We need not lock the relation since it was already locked, either by
+ * the rewriter or when expand_inherited_rtentry() added it to the query's
+ * rangetable.
*/
relation = heap_open(relationObjectId, NoLock);
/*
* Estimate relation size --- unless it's an inheritance parent, in which
- * case the size will be computed later in set_append_rel_pathlist, and
- * we must leave it zero for now to avoid bollixing the total_table_pages
+ * case the size will be computed later in set_append_rel_pathlist, and we
+ * must leave it zero for now to avoid bollixing the total_table_pages
* calculation.
*/
if (!inhparent)
/*
* Ignore invalid indexes, since they can't safely be used for
- * queries. Note that this is OK because the data structure
- * we are constructing is only used by the planner --- the
- * executor still needs to insert into "invalid" indexes!
+ * queries. Note that this is OK because the data structure we
+ * are constructing is only used by the planner --- the executor
+ * still needs to insert into "invalid" indexes!
*/
if (!index->indisvalid)
{
/*
* We do not currently enforce that CHECK constraints contain only
* immutable functions, so it's necessary to check here. We daren't draw
- * conclusions from plan-time evaluation of non-immutable functions.
- * Since they're ANDed, we can just ignore any mutable constraints in
- * the list, and reason about the rest.
+ * conclusions from plan-time evaluation of non-immutable functions. Since
+ * they're ANDed, we can just ignore any mutable constraints in the list,
+ * and reason about the rest.
*/
safe_constraints = NIL;
foreach(lc, constraint_pred)
{
- Node *pred = (Node *) lfirst(lc);
+ Node *pred = (Node *) lfirst(lc);
if (!contain_mutable_functions(pred))
safe_constraints = lappend(safe_constraints, pred);
* refute the entire collection at once. This may allow us to make proofs
* that would fail if we took them individually.
*
- * Note: we use rel->baserestrictinfo, not safe_restrictions as might
- * seem an obvious optimization. Some of the clauses might be OR clauses
- * that have volatile and nonvolatile subclauses, and it's OK to make
+ * Note: we use rel->baserestrictinfo, not safe_restrictions as might seem
+ * an obvious optimization. Some of the clauses might be OR clauses that
+ * have volatile and nonvolatile subclauses, and it's OK to make
* deductions with the nonvolatile parts.
*/
if (predicate_refuted_by(safe_constraints, rel->baserestrictinfo))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.9 2006/09/28 20:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/predtest.c,v 1.10 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
switch (pclass)
{
case CLASS_AND:
+
/*
* AND-clause => AND-clause if A implies each of B's items
*/
return result;
case CLASS_OR:
+
/*
* AND-clause => OR-clause if A implies any of B's items
*
iterate_end(pred_info);
if (result)
return result;
+
/*
* Also check if any of A's items implies B
*
return result;
case CLASS_ATOM:
+
/*
* AND-clause => atom if any of A's items implies B
*/
switch (pclass)
{
case CLASS_OR:
+
/*
* OR-clause => OR-clause if each of A's items implies any
* of B's items. Messy but can't do it any more simply.
result = true;
iterate_begin(citem, clause, clause_info)
{
- bool presult = false;
+ bool presult = false;
iterate_begin(pitem, predicate, pred_info)
{
case CLASS_AND:
case CLASS_ATOM:
+
/*
* OR-clause => AND-clause if each of A's items implies B
*
switch (pclass)
{
case CLASS_AND:
+
/*
* atom => AND-clause if A implies each of B's items
*/
return result;
case CLASS_OR:
+
/*
* atom => OR-clause if A implies any of B's items
*/
return result;
case CLASS_ATOM:
+
/*
* atom => atom is the base case
*/
switch (pclass)
{
case CLASS_AND:
+
/*
* AND-clause R=> AND-clause if A refutes any of B's items
*
iterate_end(pred_info);
if (result)
return result;
+
/*
* Also check if any of A's items refutes B
*
return result;
case CLASS_OR:
+
/*
* AND-clause R=> OR-clause if A refutes each of B's items
*/
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* AND-clause R=> atom if any of A's items refutes B
*/
switch (pclass)
{
case CLASS_OR:
+
/*
* OR-clause R=> OR-clause if A refutes each of B's items
*/
return result;
case CLASS_AND:
+
/*
* OR-clause R=> AND-clause if each of A's items refutes
* any of B's items.
result = true;
iterate_begin(citem, clause, clause_info)
{
- bool presult = false;
+ bool presult = false;
iterate_begin(pitem, predicate, pred_info)
{
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* OR-clause R=> atom if each of A's items refutes B
*/
break;
case CLASS_ATOM:
+
/*
* If A is a NOT-clause, A R=> B if B => A's arg
*/
switch (pclass)
{
case CLASS_AND:
+
/*
* atom R=> AND-clause if A refutes any of B's items
*/
return result;
case CLASS_OR:
+
/*
* atom R=> OR-clause if A refutes each of B's items
*/
return result;
case CLASS_ATOM:
+
/*
* If B is a NOT-clause, A R=> B if A => B's arg
*/
if (not_arg &&
predicate_implied_by_recurse(clause, not_arg))
return true;
+
/*
* atom R=> atom is the base case
*/
Assert(!IsA(clause, RestrictInfo));
/*
- * If we see a List, assume it's an implicit-AND list; this is the
- * correct semantics for lists of RestrictInfo nodes.
+ * If we see a List, assume it's an implicit-AND list; this is the correct
+ * semantics for lists of RestrictInfo nodes.
*/
if (IsA(clause, List))
{
if (IsA(clause, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
- Node *arraynode = (Node *) lsecond(saop->args);
+ Node *arraynode = (Node *) lsecond(saop->args);
/*
- * We can break this down into an AND or OR structure, but only if
- * we know how to iterate through expressions for the array's
- * elements. We can do that if the array operand is a non-null
- * constant or a simple ArrayExpr.
+ * We can break this down into an AND or OR structure, but only if we
+ * know how to iterate through expressions for the array's elements.
+ * We can do that if the array operand is a non-null constant or a
+ * simple ArrayExpr.
*/
if (arraynode && IsA(arraynode, Const) &&
!((Const *) arraynode)->constisnull)
}
/*
- * PredIterInfo routines for iterating over regular Lists. The iteration
+ * PredIterInfo routines for iterating over regular Lists. The iteration
* state variable is the next ListCell to visit.
*/
static void
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) clause;
ArrayExprIterState *state;
- ArrayExpr *arrayexpr;
+ ArrayExpr *arrayexpr;
/* Create working state struct */
state = (ArrayExprIterState *) palloc(sizeof(ArrayExprIterState));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.82 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.83 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case RTE_SUBQUERY:
case RTE_FUNCTION:
case RTE_VALUES:
+
/*
- * Subquery, function, or values list --- set up attr range
- * and arrays
+ * Subquery, function, or values list --- set up attr range and
+ * arrays
*
* Note: 0 is included in range to support whole-row Vars
*/
int ndx;
/*
- * We can't run into any child RowExprs here, but we could find
- * a whole-row Var with a ConvertRowtypeExpr atop it.
+ * We can't run into any child RowExprs here, but we could find a
+ * whole-row Var with a ConvertRowtypeExpr atop it.
*/
var = origvar;
while (!IsA(var, Var))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.48 2006/07/01 18:38:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.49 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
return NIL;
}
+
/*
* If the sublist contains multiple RestrictInfos, we create an
* AND subclause. If there's just one, we have to check if it's
* an OR clause, and if so flatten it to preserve AND/OR flatness
* of our output.
*
- * We construct lists with and without sub-RestrictInfos, so
- * as not to have to regenerate duplicate RestrictInfos below.
+ * We construct lists with and without sub-RestrictInfos, so as
+ * not to have to regenerate duplicate RestrictInfos below.
*/
if (list_length(sublist) > 1)
{
}
else
{
- RestrictInfo *subri = (RestrictInfo *) linitial(sublist);
+ RestrictInfo *subri = (RestrictInfo *) linitial(sublist);
Assert(IsA(subri, RestrictInfo));
if (restriction_is_or_clause(subri))
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.351 2006/09/18 16:04:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.352 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
List **extras_before, List **extras_after);
static List *transformInsertRow(ParseState *pstate, List *exprlist,
- List *stmtcols, List *icolumns, List *attrnos);
+ List *stmtcols, List *icolumns, List *attrnos);
static List *transformReturningList(ParseState *pstate, List *returningList);
static Query *transformIndexStmt(ParseState *pstate, IndexStmt *stmt);
static Query *transformRuleStmt(ParseState *query, RuleStmt *stmt,
bool isAddConstraint);
static void applyColumnNames(List *dst, List *src);
static void getSetColTypes(ParseState *pstate, Node *node,
- List **colTypes, List **colTypmods);
+ List **colTypes, List **colTypmods);
static void transformLockingClause(Query *qry, LockingClause *lc);
static void transformConstraintAttrs(List *constraintList);
static void transformColumnType(ParseState *pstate, ColumnDef *column);
case T_CopyStmt:
{
- CopyStmt *n = (CopyStmt *) parseTree;
+ CopyStmt *n = (CopyStmt *) parseTree;
result = makeNode(Query);
result->commandType = CMD_UTILITY;
/*
* We have three cases to deal with: DEFAULT VALUES (selectStmt == NULL),
- * VALUES list, or general SELECT input. We special-case VALUES, both
- * for efficiency and so we can handle DEFAULT specifications.
+ * VALUES list, or general SELECT input. We special-case VALUES, both for
+ * efficiency and so we can handle DEFAULT specifications.
*/
isGeneralSelect = (selectStmt && selectStmt->valuesLists == NIL);
{
/*
* We have INSERT ... DEFAULT VALUES. We can handle this case by
- * emitting an empty targetlist --- all columns will be defaulted
- * when the planner expands the targetlist.
+ * emitting an empty targetlist --- all columns will be defaulted when
+ * the planner expands the targetlist.
*/
exprList = NIL;
}
else if (list_length(selectStmt->valuesLists) > 1)
{
/*
- * Process INSERT ... VALUES with multiple VALUES sublists.
- * We generate a VALUES RTE holding the transformed expression
- * lists, and build up a targetlist containing Vars that reference
- * the VALUES RTE.
+ * Process INSERT ... VALUES with multiple VALUES sublists. We
+ * generate a VALUES RTE holding the transformed expression lists, and
+ * build up a targetlist containing Vars that reference the VALUES
+ * RTE.
*/
List *exprsLists = NIL;
int sublist_length = -1;
foreach(lc, selectStmt->valuesLists)
{
- List *sublist = (List *) lfirst(lc);
+ List *sublist = (List *) lfirst(lc);
/* Do basic expression transformation (same as a ROW() expr) */
sublist = transformExpressionList(pstate, sublist);
/*
- * All the sublists must be the same length, *after* transformation
- * (which might expand '*' into multiple items). The VALUES RTE
- * can't handle anything different.
+ * All the sublists must be the same length, *after*
+ * transformation (which might expand '*' into multiple items).
+ * The VALUES RTE can't handle anything different.
*/
if (sublist_length < 0)
{
/*
* There mustn't have been any table references in the expressions,
- * else strange things would happen, like Cartesian products of
- * those tables with the VALUES list ...
+ * else strange things would happen, like Cartesian products of those
+ * tables with the VALUES list ...
*/
if (pstate->p_joinlist != NIL)
ereport(ERROR,
errmsg("VALUES must not contain table references")));
/*
- * Another thing we can't currently support is NEW/OLD references
- * in rules --- seems we'd need something like SQL99's LATERAL
- * construct to ensure that the values would be available while
- * evaluating the VALUES RTE. This is a shame. FIXME
+ * Another thing we can't currently support is NEW/OLD references in
+ * rules --- seems we'd need something like SQL99's LATERAL construct
+ * to ensure that the values would be available while evaluating the
+ * VALUES RTE. This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) exprsLists, 0))
* INSERT INTO foo VALUES(bar.*)
*
* The sublist is just computed directly as the Query's targetlist,
- * with no VALUES RTE. So it works just like SELECT without FROM.
+ * with no VALUES RTE. So it works just like SELECT without FROM.
*----------
*/
List *valuesLists = selectStmt->valuesLists;
attnos = list_head(attrnos);
foreach(lc, exprList)
{
- Expr *expr = (Expr *) lfirst(lc);
+ Expr *expr = (Expr *) lfirst(lc);
ResTarget *col;
TargetEntry *tle;
}
/*
- * If we have a RETURNING clause, we need to add the target relation
- * to the query namespace before processing it, so that Var references
- * in RETURNING will work. Also, remove any namespace entries added
- * in a sub-SELECT or VALUES list.
+ * If we have a RETURNING clause, we need to add the target relation to
+ * the query namespace before processing it, so that Var references in
+ * RETURNING will work. Also, remove any namespace entries added in a
+ * sub-SELECT or VALUES list.
*/
if (stmt->returningList)
{
transformInsertRow(ParseState *pstate, List *exprlist,
List *stmtcols, List *icolumns, List *attrnos)
{
- List *result;
+ List *result;
ListCell *lc;
ListCell *icols;
ListCell *attnos;
* Check length of expr list. It must not have more expressions than
* there are target columns. We allow fewer, but only if no explicit
* columns list was given (the remaining columns are implicitly
- * defaulted). Note we must check this *after* transformation because
+ * defaulted). Note we must check this *after* transformation because
* that could expand '*' into multiple items.
*/
if (list_length(exprlist) > list_length(icolumns))
attnos = list_head(attrnos);
foreach(lc, exprlist)
{
- Expr *expr = (Expr *) lfirst(lc);
+ Expr *expr = (Expr *) lfirst(lc);
ResTarget *col;
col = (ResTarget *) lfirst(icols);
TupleConstr *constr;
AclResult aclresult;
- bool including_defaults = false;
- bool including_constraints = false;
- bool including_indexes = false;
- ListCell *elem;
+ bool including_defaults = false;
+ bool including_constraints = false;
+ bool including_indexes = false;
+ ListCell *elem;
relation = heap_openrv(inhRelation->relation, AccessShareLock);
constr = tupleDesc->constr;
foreach(elem, inhRelation->options)
+ {
+ int option = lfirst_int(elem);
+
+ switch (option)
{
- int option = lfirst_int(elem);
- switch (option)
- {
- case CREATE_TABLE_LIKE_INCLUDING_DEFAULTS:
- including_defaults = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS:
- including_defaults = false;
- break;
- case CREATE_TABLE_LIKE_INCLUDING_CONSTRAINTS:
- including_constraints = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_CONSTRAINTS:
- including_constraints = false;
- break;
- case CREATE_TABLE_LIKE_INCLUDING_INDEXES:
- including_indexes = true;
- break;
- case CREATE_TABLE_LIKE_EXCLUDING_INDEXES:
- including_indexes = false;
- break;
- default:
- elog(ERROR, "unrecognized CREATE TABLE LIKE option: %d", option);
- }
+ case CREATE_TABLE_LIKE_INCLUDING_DEFAULTS:
+ including_defaults = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS:
+ including_defaults = false;
+ break;
+ case CREATE_TABLE_LIKE_INCLUDING_CONSTRAINTS:
+ including_constraints = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_CONSTRAINTS:
+ including_constraints = false;
+ break;
+ case CREATE_TABLE_LIKE_INCLUDING_INDEXES:
+ including_indexes = true;
+ break;
+ case CREATE_TABLE_LIKE_EXCLUDING_INDEXES:
+ including_indexes = false;
+ break;
+ default:
+ elog(ERROR, "unrecognized CREATE TABLE LIKE option: %d", option);
}
+ }
if (including_indexes)
elog(ERROR, "TODO");
}
}
- if (including_constraints && tupleDesc->constr) {
- int ccnum;
+ if (including_constraints && tupleDesc->constr)
+ {
+ int ccnum;
AttrNumber *attmap = varattnos_map_schema(tupleDesc, cxt->columns);
- for(ccnum = 0; ccnum < tupleDesc->constr->num_check; ccnum++) {
- char *ccname = tupleDesc->constr->check[ccnum].ccname;
- char *ccbin = tupleDesc->constr->check[ccnum].ccbin;
- Node *ccbin_node = stringToNode(ccbin);
+ for (ccnum = 0; ccnum < tupleDesc->constr->num_check; ccnum++)
+ {
+ char *ccname = tupleDesc->constr->check[ccnum].ccname;
+ char *ccbin = tupleDesc->constr->check[ccnum].ccbin;
+ Node *ccbin_node = stringToNode(ccbin);
Constraint *n = makeNode(Constraint);
change_varattnos_of_a_node(ccbin_node, attmap);
n->raw_expr = ccbin_node;
n->cooked_expr = NULL;
n->indexspace = NULL;
- cxt->ckconstraints = lappend(cxt->ckconstraints, (Node*)n);
+ cxt->ckconstraints = lappend(cxt->ckconstraints, (Node *) n);
}
}
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in rule WHERE condition")));
+ errmsg("cannot use aggregate function in rule WHERE condition")));
/* save info about sublinks in where clause */
qry->hasSubLinks = pstate->p_hasSubLinks;
RangeTblEntry *rte;
RangeTblRef *rtr;
ListCell *lc;
- ListCell *lc2;
- int i;
+ ListCell *lc2;
+ int i;
qry->commandType = CMD_SELECT;
Assert(stmt->op == SETOP_NONE);
/*
- * For each row of VALUES, transform the raw expressions and gather
- * type information. This is also a handy place to reject DEFAULT
- * nodes, which the grammar allows for simplicity.
+ * For each row of VALUES, transform the raw expressions and gather type
+ * information. This is also a handy place to reject DEFAULT nodes, which
+ * the grammar allows for simplicity.
*/
foreach(lc, stmt->valuesLists)
{
- List *sublist = (List *) lfirst(lc);
+ List *sublist = (List *) lfirst(lc);
/* Do basic expression transformation (same as a ROW() expr) */
sublist = transformExpressionList(pstate, sublist);
/*
* All the sublists must be the same length, *after* transformation
- * (which might expand '*' into multiple items). The VALUES RTE
- * can't handle anything different.
+ * (which might expand '*' into multiple items). The VALUES RTE can't
+ * handle anything different.
*/
if (sublist_length < 0)
{
i = 0;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
if (IsA(col, SetToDefault))
ereport(ERROR,
}
/*
- * Now resolve the common types of the columns, and coerce everything
- * to those types.
+ * Now resolve the common types of the columns, and coerce everything to
+ * those types.
*/
for (i = 0; i < sublist_length; i++)
{
newExprsLists = NIL;
foreach(lc, exprsLists)
{
- List *sublist = (List *) lfirst(lc);
- List *newsublist = NIL;
+ List *sublist = (List *) lfirst(lc);
+ List *newsublist = NIL;
i = 0;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
col = coerce_to_common_type(pstate, col, coltypes[i], "VALUES");
newsublist = lappend(newsublist, col);
qry->targetList = expandRelAttrs(pstate, rte, rtr->rtindex, 0);
/*
- * The grammar allows attaching ORDER BY, LIMIT, and FOR UPDATE
- * to a VALUES, so cope.
+ * The grammar allows attaching ORDER BY, LIMIT, and FOR UPDATE to a
+ * VALUES, so cope.
*/
qry->sortClause = transformSortClause(pstate,
stmt->sortClause,
if (stmt->lockingClause)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE cannot be applied to VALUES")));
+ errmsg("SELECT FOR UPDATE/SHARE cannot be applied to VALUES")));
/* handle any CREATE TABLE AS spec */
if (stmt->into)
}
/*
- * There mustn't have been any table references in the expressions,
- * else strange things would happen, like Cartesian products of
- * those tables with the VALUES list. We have to check this after
- * parsing ORDER BY et al since those could insert more junk.
+ * There mustn't have been any table references in the expressions, else
+ * strange things would happen, like Cartesian products of those tables
+ * with the VALUES list. We have to check this after parsing ORDER BY et
+ * al since those could insert more junk.
*/
if (list_length(pstate->p_joinlist) != 1)
ereport(ERROR,
errmsg("VALUES must not contain table references")));
/*
- * Another thing we can't currently support is NEW/OLD references
- * in rules --- seems we'd need something like SQL99's LATERAL
- * construct to ensure that the values would be available while
- * evaluating the VALUES RTE. This is a shame. FIXME
+ * Another thing we can't currently support is NEW/OLD references in rules
+ * --- seems we'd need something like SQL99's LATERAL construct to ensure
+ * that the values would be available while evaluating the VALUES RTE.
+ * This is a shame. FIXME
*/
if (list_length(pstate->p_rtable) != 1 &&
contain_vars_of_level((Node *) newExprsLists, 0))
/*
* Find leftmost leaf SelectStmt; extract the one-time-only items from it
* and from the top-level node. (Most of the INTO options can be
- * transferred to the Query immediately, but intoColNames has to be
- * saved to apply below.)
+ * transferred to the Query immediately, but intoColNames has to be saved
+ * to apply below.)
*/
leftmostSelect = stmt->larg;
while (leftmostSelect && leftmostSelect->op != SETOP_NONE)
qry->hasSubLinks = pstate->p_hasSubLinks;
/*
- * Top-level aggregates are simply disallowed in UPDATE, per spec.
- * (From an implementation point of view, this is forced because the
- * implicit ctid reference would otherwise be an ungrouped variable.)
+ * Top-level aggregates are simply disallowed in UPDATE, per spec. (From
+ * an implementation point of view, this is forced because the implicit
+ * ctid reference would otherwise be an ungrouped variable.)
*/
if (pstate->p_hasAggs)
ereport(ERROR,
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
ResTarget *origTarget;
- int attrno;
+ int attrno;
if (tle->resjunk)
{
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" of relation \"%s\" does not exist",
origTarget->name,
- RelationGetRelationName(pstate->p_target_relation)),
+ RelationGetRelationName(pstate->p_target_relation)),
parser_errposition(pstate, origTarget->location)));
updateTargetListEntry(pstate, tle, origTarget->name,
return NIL; /* nothing to do */
/*
- * We need to assign resnos starting at one in the RETURNING list.
- * Save and restore the main tlist's value of p_next_resno, just in
- * case someone looks at it later (probably won't happen).
+ * We need to assign resnos starting at one in the RETURNING list. Save
+ * and restore the main tlist's value of p_next_resno, just in case
+ * someone looks at it later (probably won't happen).
*/
save_next_resno = pstate->p_next_resno;
pstate->p_next_resno = 1;
if (list_length(pstate->p_rtable) != length_rtable)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("RETURNING may not contain references to other relations")));
+ errmsg("RETURNING may not contain references to other relations")));
/* mark column origins */
markTargetListOrigins(pstate, rlist);
transformPrepareStmt(ParseState *pstate, PrepareStmt *stmt)
{
Query *result = makeNode(Query);
- List *argtype_oids; /* argtype OIDs in a list */
+ List *argtype_oids; /* argtype OIDs in a list */
Oid *argtoids = NULL; /* and as an array */
int nargs;
List *queries;
}
/*
- * Analyze the statement using these parameter types (any
- * parameters passed in from above us will not be visible to it),
- * allowing information about unknown parameters to be deduced
- * from context.
+ * Analyze the statement using these parameter types (any parameters
+ * passed in from above us will not be visible to it), allowing
+ * information about unknown parameters to be deduced from context.
*/
queries = parse_analyze_varparams((Node *) stmt->query,
pstate->p_sourcetext,
elog(ERROR, "unexpected extra stuff in prepared statement");
/*
- * Check that all parameter types were determined, and convert the
- * array of OIDs into a list for storage.
+ * Check that all parameter types were determined, and convert the array
+ * of OIDs into a list for storage.
*/
argtype_oids = NIL;
for (i = 0; i < nargs; i++)
if (qry->havingQual != NULL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE/SHARE is not allowed with HAVING clause")));
+ errmsg("SELECT FOR UPDATE/SHARE is not allowed with HAVING clause")));
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
if ((rc = get_rowmark(qry, rtindex)) != NULL)
{
/*
- * If the same RTE is specified both FOR UPDATE and FOR SHARE,
- * treat it as FOR UPDATE. (Reasonable, since you can't take
- * both a shared and exclusive lock at the same time; it'll
- * end up being exclusive anyway.)
+ * If the same RTE is specified both FOR UPDATE and FOR SHARE, treat
+ * it as FOR UPDATE. (Reasonable, since you can't take both a shared
+ * and exclusive lock at the same time; it'll end up being exclusive
+ * anyway.)
*
- * We also consider that NOWAIT wins if it's specified both ways.
- * This is a bit more debatable but raising an error doesn't
- * seem helpful. (Consider for instance SELECT FOR UPDATE NOWAIT
- * from a view that internally contains a plain FOR UPDATE spec.)
+ * We also consider that NOWAIT wins if it's specified both ways. This
+ * is a bit more debatable but raising an error doesn't seem helpful.
+ * (Consider for instance SELECT FOR UPDATE NOWAIT from a view that
+ * internally contains a plain FOR UPDATE spec.)
*/
rc->forUpdate |= forUpdate;
rc->noWait |= noWait;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.157 2006/08/14 23:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.158 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If a coldeflist was supplied, ensure it defines a legal set of names
* (no duplicates) and datatypes (no pseudo-types, for instance).
- * addRangeTableEntryForFunction looked up the type names but didn't
- * check them further than that.
+ * addRangeTableEntryForFunction looked up the type names but didn't check
+ * them further than that.
*/
if (r->coldeflist)
{
ListCell *l;
/* Preprocess the grouping clause, lookup TLEs */
- foreach (l, grouplist)
+ foreach(l, grouplist)
{
TargetEntry *tle;
- Oid restype;
+ Oid restype;
tle = findTargetlistEntry(pstate, lfirst(l),
targetlist, GROUP_CLAUSE);
}
/*
- * Now iterate through the ORDER BY clause. If we find a grouping
- * element that matches the ORDER BY element, append the grouping
- * element to the result set immediately. Otherwise, stop
- * iterating. The effect of this is to look for a prefix of the
- * ORDER BY list in the grouping clauses, and to move that prefix
- * to the front of the GROUP BY.
+ * Now iterate through the ORDER BY clause. If we find a grouping element
+ * that matches the ORDER BY element, append the grouping element to the
+ * result set immediately. Otherwise, stop iterating. The effect of this
+ * is to look for a prefix of the ORDER BY list in the grouping clauses,
+ * and to move that prefix to the front of the GROUP BY.
*/
- foreach (l, sortClause)
+ foreach(l, sortClause)
{
- SortClause *sc = (SortClause *) lfirst(l);
- ListCell *prev = NULL;
- ListCell *tl;
- bool found = false;
+ SortClause *sc = (SortClause *) lfirst(l);
+ ListCell *prev = NULL;
+ ListCell *tl;
+ bool found = false;
- foreach (tl, tle_list)
+ foreach(tl, tle_list)
{
TargetEntry *tle = (TargetEntry *) lfirst(tl);
}
/*
- * Now add any remaining elements of the GROUP BY list in the
- * order we received them.
+ * Now add any remaining elements of the GROUP BY list in the order we
+ * received them.
*
- * XXX: are there any additional criteria to consider when
- * ordering grouping clauses?
+ * XXX: are there any additional criteria to consider when ordering
+ * grouping clauses?
*/
foreach(l, tle_list)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
GroupClause *gc;
- Oid sort_op;
+ Oid sort_op;
/* avoid making duplicate grouplist entries */
if (targetIsInSortList(tle, result))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.143 2006/07/26 19:31:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.144 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If the target type is a domain, we want to call its base type's
- * input routine, not domain_in(). This is to avoid premature
- * failure when the domain applies a typmod: existing input
- * routines follow implicit-coercion semantics for length checks,
- * which is not always what we want here. The needed check will
- * be applied properly inside coerce_to_domain().
+ * input routine, not domain_in(). This is to avoid premature failure
+ * when the domain applies a typmod: existing input routines follow
+ * implicit-coercion semantics for length checks, which is not always
+ * what we want here. The needed check will be applied properly
+ * inside coerce_to_domain().
*/
baseTypeMod = -1;
baseTypeId = getBaseTypeAndTypmod(targetTypeId, &baseTypeMod);
newcon->constisnull = con->constisnull;
/*
- * We pass typmod -1 to the input routine, primarily because
- * existing input routines follow implicit-coercion semantics for
- * length checks, which is not always what we want here. Any
- * length constraint will be applied later by our caller.
+ * We pass typmod -1 to the input routine, primarily because existing
+ * input routines follow implicit-coercion semantics for length
+ * checks, which is not always what we want here. Any length
+ * constraint will be applied later by our caller.
*
- * We assume here that UNKNOWN's internal representation is the
- * same as CSTRING.
+ * We assume here that UNKNOWN's internal representation is the same
+ * as CSTRING.
*/
if (!con->constisnull)
newcon->constvalue = stringTypeDatum(targetType,
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
/* translator: first %s is name of a SQL construct, eg LIMIT */
- errmsg("argument of %s must be type bigint, not type %s",
- constructName, format_type_be(inputTypeId))));
+ errmsg("argument of %s must be type bigint, not type %s",
+ constructName, format_type_be(inputTypeId))));
}
if (expression_returns_set(node))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.197 2006/08/12 20:05:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.198 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *typecast_expression(ParseState *pstate, Node *expr,
TypeName *typename);
static Node *make_row_comparison_op(ParseState *pstate, List *opname,
- List *largs, List *rargs, int location);
+ List *largs, List *rargs, int location);
static Node *make_row_distinct_op(ParseState *pstate, List *opname,
RowExpr *lrow, RowExpr *rrow, int location);
static Expr *make_distinct_op(ParseState *pstate, List *opname,
transformAExprOf(ParseState *pstate, A_Expr *a)
{
/*
- * Checking an expression for match to a list of type names.
- * Will result in a boolean constant node.
+ * Checking an expression for match to a list of type names. Will result
+ * in a boolean constant node.
*/
Node *lexpr = transformExpr(pstate, a->lexpr);
ListCell *telem;
}
/*
- * We have two forms: equals or not equals. Flip the sense of the result
+ * We have two forms: equals or not equals. Flip the sense of the result
* for not equals.
*/
if (strcmp(strVal(linitial(a->name)), "<>") == 0)
useOr = true;
/*
- * We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is
- * only possible if the inputs are all scalars (no RowExprs) and there
- * is a suitable array type available. If not, we fall back to a
- * boolean condition tree with multiple copies of the lefthand expression.
+ * We try to generate a ScalarArrayOpExpr from IN/NOT IN, but this is only
+ * possible if the inputs are all scalars (no RowExprs) and there is a
+ * suitable array type available. If not, we fall back to a boolean
+ * condition tree with multiple copies of the lefthand expression.
*
* First step: transform all the inputs, and detect whether any are
* RowExprs.
rexprs = NIL;
foreach(l, (List *) a->rexpr)
{
- Node *rexpr = transformExpr(pstate, lfirst(l));
+ Node *rexpr = transformExpr(pstate, lfirst(l));
haveRowExpr |= (rexpr && IsA(rexpr, RowExpr));
rexprs = lappend(rexprs, rexpr);
}
/*
- * If not forced by presence of RowExpr, try to resolve a common
- * scalar type for all the expressions, and see if it has an array type.
- * (But if there's only one righthand expression, we may as well just
- * fall through and generate a simple = comparison.)
+ * If not forced by presence of RowExpr, try to resolve a common scalar
+ * type for all the expressions, and see if it has an array type. (But if
+ * there's only one righthand expression, we may as well just fall through
+ * and generate a simple = comparison.)
*/
if (!haveRowExpr && list_length(rexprs) != 1)
{
Oid array_type;
/*
- * Select a common type for the array elements. Note that since
- * the LHS' type is first in the list, it will be preferred when
- * there is doubt (eg, when all the RHS items are unknown literals).
+ * Select a common type for the array elements. Note that since the
+ * LHS' type is first in the list, it will be preferred when there is
+ * doubt (eg, when all the RHS items are unknown literals).
*/
scalar_type = select_common_type(typeids, "IN");
if (array_type != InvalidOid)
{
/*
- * OK: coerce all the right-hand inputs to the common type
- * and build an ArrayExpr for them.
+ * OK: coerce all the right-hand inputs to the common type and
+ * build an ArrayExpr for them.
*/
List *aexprs;
ArrayExpr *newa;
!IsA(rexpr, RowExpr))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("arguments of row IN must all be row expressions"),
+ errmsg("arguments of row IN must all be row expressions"),
parser_errposition(pstate, a->location)));
cmp = make_row_comparison_op(pstate,
a->name,
- (List *) copyObject(((RowExpr *) lexpr)->args),
+ (List *) copyObject(((RowExpr *) lexpr)->args),
((RowExpr *) rexpr)->args,
a->location);
}
if (sublink->subLinkType == EXISTS_SUBLINK)
{
/*
- * EXISTS needs no test expression or combining operator.
- * These fields should be null already, but make sure.
+ * EXISTS needs no test expression or combining operator. These fields
+ * should be null already, but make sure.
*/
sublink->testexpr = NULL;
sublink->operName = NIL;
}
/*
- * EXPR and ARRAY need no test expression or combining operator.
- * These fields should be null already, but make sure.
+ * EXPR and ARRAY need no test expression or combining operator. These
+ * fields should be null already, but make sure.
*/
sublink->testexpr = NULL;
sublink->operName = NIL;
left_list = list_make1(lefthand);
/*
- * Build a list of PARAM_SUBLINK nodes representing the
- * output columns of the subquery.
+ * Build a list of PARAM_SUBLINK nodes representing the output columns
+ * of the subquery.
*/
right_list = NIL;
foreach(l, qtree->targetList)
}
/*
- * We could rely on make_row_comparison_op to complain if the
- * list lengths differ, but we prefer to generate a more specific
- * error message.
+ * We could rely on make_row_comparison_op to complain if the list
+ * lengths differ, but we prefer to generate a more specific error
+ * message.
*/
if (list_length(left_list) < list_length(right_list))
ereport(ERROR,
parser_errposition(pstate, location)));
/*
- * We can't compare zero-length rows because there is no principled
- * basis for figuring out what the operator is.
+ * We can't compare zero-length rows because there is no principled basis
+ * for figuring out what the operator is.
*/
if (nopers == 0)
ereport(ERROR,
parser_errposition(pstate, location)));
/*
- * Identify all the pairwise operators, using make_op so that
- * behavior is the same as in the simple scalar case.
+ * Identify all the pairwise operators, using make_op so that behavior is
+ * the same as in the simple scalar case.
*/
opexprs = NIL;
forboth(l, largs, r, rargs)
if (cmp->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("row comparison operator must yield type boolean, "
- "not type %s",
- format_type_be(cmp->opresulttype)),
+ errmsg("row comparison operator must yield type boolean, "
+ "not type %s",
+ format_type_be(cmp->opresulttype)),
parser_errposition(pstate, location)));
if (expression_returns_set((Node *) cmp))
ereport(ERROR,
}
/*
- * If rows are length 1, just return the single operator. In this
- * case we don't insist on identifying btree semantics for the operator
- * (but we still require it to return boolean).
+ * If rows are length 1, just return the single operator. In this case we
+ * don't insist on identifying btree semantics for the operator (but we
+ * still require it to return boolean).
*/
if (nopers == 1)
return (Node *) linitial(opexprs);
/*
* Now we must determine which row comparison semantics (= <> < <= > >=)
- * apply to this set of operators. We look for btree opclasses containing
+ * apply to this set of operators. We look for btree opclasses containing
* the operators, and see which interpretations (strategy numbers) exist
* for each operator.
*/
i = 0;
foreach(l, opexprs)
{
- Bitmapset *this_strats;
+ Bitmapset *this_strats;
ListCell *j;
get_op_btree_interpretation(((OpExpr *) lfirst(l))->opno,
&opclass_lists[i], &opstrat_lists[i]);
+
/*
- * convert strategy number list to a Bitmapset to make the intersection
- * calculation easy.
+ * convert strategy number list to a Bitmapset to make the
+ * intersection calculation easy.
*/
this_strats = NULL;
foreach(j, opstrat_lists[i])
/*
* Prefer the interpretation with the most default opclasses.
*/
- int best_defaults = 0;
- bool multiple_best = false;
- int this_rctype;
+ int best_defaults = 0;
+ bool multiple_best = false;
+ int this_rctype;
rctype = 0; /* keep compiler quiet */
while ((this_rctype = bms_first_member(strats)) >= 0)
{
- int ndefaults = 0;
+ int ndefaults = 0;
for (i = 0; i < nopers; i++)
{
forboth(l, opclass_lists[i], r, opstrat_lists[i])
{
- Oid opclass = lfirst_oid(l);
- int opstrat = lfirst_int(r);
+ Oid opclass = lfirst_oid(l);
+ int opstrat = lfirst_int(r);
if (opstrat == this_rctype &&
opclass_is_default(opclass))
}
/*
- * For = and <> cases, we just combine the pairwise operators with
- * AND or OR respectively.
+ * For = and <> cases, we just combine the pairwise operators with AND or
+ * OR respectively.
*
* Note: this is presently the only place where the parser generates
- * BoolExpr with more than two arguments. Should be OK since the
- * rest of the system thinks BoolExpr is N-argument anyway.
+ * BoolExpr with more than two arguments. Should be OK since the rest of
+ * the system thinks BoolExpr is N-argument anyway.
*/
if (rctype == ROWCOMPARE_EQ)
return (Node *) makeBoolExpr(AND_EXPR, opexprs);
return (Node *) makeBoolExpr(OR_EXPR, opexprs);
/*
- * Otherwise we need to determine exactly which opclass to associate
- * with each operator.
+ * Otherwise we need to determine exactly which opclass to associate with
+ * each operator.
*/
opclasses = NIL;
for (i = 0; i < nopers; i++)
{
- Oid best_opclass = 0;
- int ndefault = 0;
- int nmatch = 0;
+ Oid best_opclass = 0;
+ int ndefault = 0;
+ int nmatch = 0;
forboth(l, opclass_lists[i], r, opstrat_lists[i])
{
- Oid opclass = lfirst_oid(l);
- int opstrat = lfirst_int(r);
+ Oid opclass = lfirst_oid(l);
+ int opstrat = lfirst_int(r);
if (opstrat == rctype)
{
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("could not determine interpretation of row comparison operator %s",
strVal(llast(opname))),
- errdetail("There are multiple equally-plausible candidates."),
+ errdetail("There are multiple equally-plausible candidates."),
parser_errposition(pstate, location)));
}
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean"),
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean"),
parser_errposition(pstate, location)));
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.189 2006/07/27 19:52:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.190 2006/10/04 00:29:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *ParseComplexProjection(ParseState *pstate, char *funcname,
Node *first_arg, int location);
static void unknown_attribute(ParseState *pstate, Node *relref, char *attname,
- int location);
+ int location);
/*
/*
* Reject attempt to call a parameterless aggregate without (*)
- * syntax. This is mere pedantry but some folks insisted ...
+ * syntax. This is mere pedantry but some folks insisted ...
*/
if (fargs == NIL && !agg_star)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("%s(*) must be used to call a parameterless aggregate function",
- NameListToString(funcname)),
+ errmsg("%s(*) must be used to call a parameterless aggregate function",
+ NameListToString(funcname)),
parser_errposition(pstate, location)));
/* parse_agg.c does additional aggregate-specific processing */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.94 2006/08/02 01:59:47 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.95 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* is a dummy (always 0, in fact).
*
* The locations stored in raw parsetrees are byte offsets into the source
- * string. We have to convert them to 1-based character indexes for reporting
- * to clients. (We do things this way to avoid unnecessary overhead in the
+ * string. We have to convert them to 1-based character indexes for reporting
+ * to clients. (We do things this way to avoid unnecessary overhead in the
* normal non-error case: computing character indexes would be much more
* expensive than storing token offsets.)
*/
int
parser_errposition(ParseState *pstate, int location)
{
- int pos;
+ int pos;
/* No-op if location was not provided */
if (location < 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.89 2006/07/14 14:52:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.90 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/typcache.h"
-static Oid binary_oper_exact(List *opname, Oid arg1, Oid arg2);
+static Oid binary_oper_exact(List *opname, Oid arg1, Oid arg2);
static FuncDetailCode oper_select_candidate(int nargs,
Oid *input_typeids,
FuncCandidateList candidates,
static const char *op_signature_string(List *op, char oprkind,
Oid arg1, Oid arg2);
static void op_error(ParseState *pstate, List *op, char oprkind,
- Oid arg1, Oid arg2,
- FuncDetailCode fdresult, int location);
+ Oid arg1, Oid arg2,
+ FuncDetailCode fdresult, int location);
static Expr *make_op_expr(ParseState *pstate, Operator op,
Node *ltree, Node *rtree,
Oid ltypeId, Oid rtypeId);
if (clist != NULL)
{
/*
- * The returned list has args in the form (0, oprright).
- * Move the useful data into args[0] to keep oper_select_candidate
- * simple. XXX we are assuming here that we may scribble on the
- * list!
+ * The returned list has args in the form (0, oprright). Move the
+ * useful data into args[0] to keep oper_select_candidate simple.
+ * XXX we are assuming here that we may scribble on the list!
*/
FuncCandidateList clisti;
if (!OidIsValid(rtypeId))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires array on right side"),
+ errmsg("op ANY/ALL (array) requires array on right side"),
parser_errposition(pstate, location)));
}
if (rettype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator to yield boolean"),
+ errmsg("op ANY/ALL (array) requires operator to yield boolean"),
parser_errposition(pstate, location)));
if (get_func_retset(opform->oprcode))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("op ANY/ALL (array) requires operator not to return a set"),
+ errmsg("op ANY/ALL (array) requires operator not to return a set"),
parser_errposition(pstate, location)));
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.124 2006/08/02 01:59:47 joe Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.125 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List **colnames, List **colvars);
static int specialAttNum(const char *attname);
static void warnAutoRange(ParseState *pstate, RangeVar *relation,
- int location);
+ int location);
/*
numaliases = list_length(eref->colnames);
while (numaliases < numcolumns)
{
- char attrname[64];
+ char attrname[64];
numaliases++;
snprintf(attrname, sizeof(attrname), "column%d", numaliases);
/* issue warning or error as needed */
warnAutoRange(pstate, relation, location);
+
/*
* Note that we set inFromCl true, so that the RTE will be listed
* explicitly if the parsetree is ever decompiled by ruleutils.c. This
varattno = 0;
foreach(lc, (List *) linitial(rte->values_lists))
{
- Node *col = (Node *) lfirst(lc);
+ Node *col = (Node *) lfirst(lc);
varattno++;
if (colnames)
case RTE_VALUES:
{
/* Values RTE --- get type info from first sublist */
- List *collist = (List *) linitial(rte->values_lists);
+ List *collist = (List *) linitial(rte->values_lists);
Node *col;
if (attnum < 1 || attnum > list_length(collist))
elog(ERROR, "values list %s does not have attribute %d",
rte->eref->aliasname, attnum);
- col = (Node *) list_nth(collist, attnum-1);
+ col = (Node *) list_nth(collist, attnum - 1);
*vartype = exprType(col);
*vartypmod = exprTypmod(col);
}
/*
* Check to see if there are any potential matches in the query's
- * rangetable. This affects the message we provide.
+ * rangetable. This affects the message we provide.
*/
rte = searchRangeTable(pstate, relation);
/*
- * If we found a match that has an alias and the alias is visible in
- * the namespace, then the problem is probably use of the relation's
- * real name instead of its alias, ie "SELECT foo.* FROM foo f".
- * This mistake is common enough to justify a specific hint.
+ * If we found a match that has an alias and the alias is visible in the
+ * namespace, then the problem is probably use of the relation's real name
+ * instead of its alias, ie "SELECT foo.* FROM foo f". This mistake is
+ * common enough to justify a specific hint.
*
* If we found a match that doesn't meet those criteria, assume the
* problem is illegal use of a relation outside its scope, as in the
if (rte)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("invalid reference to FROM-clause entry for table \"%s\"",
- relation->relname),
+ errmsg("invalid reference to FROM-clause entry for table \"%s\"",
+ relation->relname),
(badAlias ?
- errhint("Perhaps you meant to reference the table alias \"%s\".",
- badAlias) :
+ errhint("Perhaps you meant to reference the table alias \"%s\".",
+ badAlias) :
errhint("There is an entry for table \"%s\", but it cannot be referenced from this part of the query.",
rte->eref->aliasname)),
parser_errposition(pstate, location)));
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
(pstate->parentParseState ?
- errmsg("missing FROM-clause entry in subquery for table \"%s\"",
- relation->relname) :
+ errmsg("missing FROM-clause entry in subquery for table \"%s\"",
+ relation->relname) :
errmsg("missing FROM-clause entry for table \"%s\"",
relation->relname)),
parser_errposition(pstate, location)));
errmsg("adding missing FROM-clause entry for table \"%s\"",
relation->relname)),
(badAlias ?
- errhint("Perhaps you meant to reference the table alias \"%s\".",
- badAlias) :
+ errhint("Perhaps you meant to reference the table alias \"%s\".",
+ badAlias) :
(rte ?
errhint("There is an entry for table \"%s\", but it cannot be referenced from this part of the query.",
rte->eref->aliasname) : 0)),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.148 2006/08/14 23:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.149 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* This is the identical transformation to transformTargetList, except that
* the input list elements are bare expressions without ResTarget decoration,
* and the output elements are likewise just expressions without TargetEntry
- * decoration. We use this for ROW() and VALUES() constructs.
+ * decoration. We use this for ROW() and VALUES() constructs.
*/
List *
transformExpressionList(ParseState *pstate, List *exprlist)
colname,
format_type_be(attrtype),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
}
/*
* updateTargetListEntry()
- * This is used in UPDATE statements only. It prepares an UPDATE
+ * This is used in UPDATE statements only. It prepares an UPDATE
* TargetEntry for assignment to a column of the target table.
* This includes coercing the given value to the target column's type
* (if necessary), and dealing with any subfield names or subscripts
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
else
ereport(ERROR,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression."),
+ errhint("You will need to rewrite or cast the expression."),
parser_errposition(pstate, location)));
}
if (attrno == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- name,
- RelationGetRelationName(pstate->p_target_relation)),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ name,
+ RelationGetRelationName(pstate->p_target_relation)),
parser_errposition(pstate, col->location)));
/*
*
* (e.g., SELECT * FROM emp, dept)
*
- * Since the grammar only accepts bare '*' at top level of SELECT,
- * we need not handle the targetlist==false case here.
+ * Since the grammar only accepts bare '*' at top level of SELECT, we
+ * need not handle the targetlist==false case here.
*/
Assert(targetlist);
return expandRelAttrs(pstate, rte, rtindex, sublevels_up);
else
{
- List *vars;
+ List *vars;
expandRTE(rte, rtindex, sublevels_up, false,
NULL, &vars);
/*
* This case should not occur: a column of a table or values list
- * shouldn't have type RECORD. Fall through and fail
- * (most likely) at the bottom.
+ * shouldn't have type RECORD. Fall through and fail (most
+ * likely) at the bottom.
*/
break;
case RTE_SUBQUERY:
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.84 2006/09/25 15:17:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.85 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
initStringInfo(&string);
foreach(l, typenames)
{
- TypeName *typename = (TypeName *) lfirst(l);
+ TypeName *typename = (TypeName *) lfirst(l);
Assert(IsA(typename, TypeName));
if (l != list_head(typenames))
/*
* Given a type structure and a string, returns the internal representation
- * of that string. The "string" can be NULL to perform conversion of a NULL
+ * of that string. The "string" can be NULL to perform conversion of a NULL
* (which might result in failure, if the input function rejects NULLs).
*/
Datum
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.67 2006/07/15 03:35:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parser.c,v 1.68 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Intermediate filter between parser and base lexer (base_yylex in scan.l).
*
* The filter is needed because in some cases the standard SQL grammar
- * requires more than one token lookahead. We reduce these cases to one-token
+ * requires more than one token lookahead. We reduce these cases to one-token
* lookahead by combining tokens here, in order to keep the grammar LALR(1).
*
* Using a filter is simpler than trying to recognize multiword tokens
switch (cur_token)
{
case WITH:
+
/*
* WITH CASCADED, LOCAL, or CHECK must be reduced to one token
*
- * XXX an alternative way is to recognize just WITH_TIME and
- * put the ugliness into the datetime datatype productions
- * instead of WITH CHECK OPTION. However that requires promoting
- * WITH to a fully reserved word. If we ever have to do that
- * anyway (perhaps for SQL99 recursive queries), come back and
- * simplify this code.
+ * XXX an alternative way is to recognize just WITH_TIME and put
+ * the ugliness into the datetime datatype productions instead of
+ * WITH CHECK OPTION. However that requires promoting WITH to a
+ * fully reserved word. If we ever have to do that anyway
+ * (perhaps for SQL99 recursive queries), come back and simplify
+ * this code.
*/
lookahead_token = base_yylex();
switch (lookahead_token)
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.12 2006/07/29 19:55:18 adunstan Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.13 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(errmsg_internal("Failed to reset socket waiting event: %i", (int) GetLastError())));
/*
- * make sure we don't multiplex this kernel event object with a different
+ * make sure we don't multiplex this kernel event object with a different
* socket from a previous call
*/
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.12 2006/08/09 21:18:13 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.13 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/* WaitForSingleObjectEx() uses milliseconds, round up */
waittime = (timerCommArea.value.it_value.tv_usec + 999) / 1000 +
- timerCommArea.value.it_value.tv_sec * 1000;
+ timerCommArea.value.it_value.tv_sec * 1000;
}
ResetEvent(timerCommArea.event);
LeaveCriticalSection(&timerCommArea.crit_sec);
* to handle the timer setting and notification upon timeout.
*/
int
-setitimer(int which, const struct itimerval *value, struct itimerval *ovalue)
+setitimer(int which, const struct itimerval * value, struct itimerval * ovalue)
{
Assert(value != NULL);
Assert(value->it_interval.tv_sec == 0 && value->it_interval.tv_usec == 0);
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32_sema.c,v 1.2 2006/07/16 02:44:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32_sema.c,v 1.3 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* so the semaphores are automatically freed when the last referencing
* process exits.
*/
-void PGReserveSemaphores(int maxSemas, int port)
+void
+PGReserveSemaphores(int maxSemas, int port)
{
- mySemSet = (HANDLE *)malloc(maxSemas * sizeof(HANDLE));
+ mySemSet = (HANDLE *) malloc(maxSemas * sizeof(HANDLE));
if (mySemSet == NULL)
elog(PANIC, "out of memory");
numSems = 0;
*
* Initialize a PGSemaphore structure to represent a sema with count 1
*/
-void PGSemaphoreCreate(PGSemaphore sema)
+void
+PGSemaphoreCreate(PGSemaphore sema)
{
HANDLE cur_handle;
SECURITY_ATTRIBUTES sec_attrs;
}
else
ereport(PANIC,
- (errmsg("could not create semaphore: error code %d", (int)GetLastError())));
+ (errmsg("could not create semaphore: error code %d", (int) GetLastError())));
}
/*
*
* Reset a previously-initialized PGSemaphore to have count 0
*/
-void PGSemaphoreReset(PGSemaphore sema)
+void
+PGSemaphoreReset(PGSemaphore sema)
{
/*
* There's no direct API for this in Win32, so we have to ratchet the
* Lock a semaphore (decrement count), blocking if count would be < 0.
* Serve the interrupt if interruptOK is true.
*/
-void PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
+void
+PGSemaphoreLock(PGSemaphore sema, bool interruptOK)
{
DWORD ret;
HANDLE wh[2];
*
* Unlock a semaphore (increment count)
*/
-void PGSemaphoreUnlock(PGSemaphore sema)
+void
+PGSemaphoreUnlock(PGSemaphore sema)
{
if (!ReleaseSemaphore(*sema, 1, NULL))
ereport(FATAL,
*
* Lock a semaphore only if able to do so without blocking
*/
-bool PGSemaphoreTryLock(PGSemaphore sema)
+bool
+PGSemaphoreTryLock(PGSemaphore sema)
{
DWORD ret;
/* Otherwise we are in trouble */
ereport(FATAL,
(errmsg("could not try-lock semaphore: error code %d", (int) GetLastError())));
-
+
/* keep compiler quiet */
return false;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.26 2006/07/31 20:09:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/autovacuum.c,v 1.27 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
BaseInit();
/*
- * Create a per-backend PGPROC struct in shared memory, except in
- * the EXEC_BACKEND case where this was done in SubPostmasterMain.
- * We must do this before we can use LWLocks (and in the EXEC_BACKEND
- * case we already had to do some stuff with LWLocks).
+ * Create a per-backend PGPROC struct in shared memory, except in the
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifndef EXEC_BACKEND
InitProcess();
PG_SETMASK(&UnBlockSig);
/*
- * Force zero_damaged_pages OFF in the autovac process, even if it is
- * set in postgresql.conf. We don't really want such a dangerous option
- * being applied non-interactively.
+ * Force zero_damaged_pages OFF in the autovac process, even if it is set
+ * in postgresql.conf. We don't really want such a dangerous option being
+ * applied non-interactively.
*/
SetConfigOption("zero_damaged_pages", "false", PGC_SUSET, PGC_S_OVERRIDE);
}
/*
- * Otherwise, skip a database with no pgstat entry; it means it hasn't
- * seen any activity.
+ * Otherwise, skip a database with no pgstat entry; it means it
+ * hasn't seen any activity.
*/
tmp->entry = pgstat_fetch_stat_dbentry(tmp->oid);
if (!tmp->entry)
* backend signalled the postmaster. Pick up the database with the
* greatest age, and apply a database-wide vacuum on it.
*/
- int32 oldest = 0;
+ int32 oldest = 0;
whole_db = true;
foreach(cell, dblist)
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
- /* functions in indexes may want a snapshot set */
+ /* functions in indexes may want a snapshot set */
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
/* Start a transaction so our commands have one to play into. */
StartTransactionCommand();
- /* functions in indexes may want a snapshot set */
+ /* functions in indexes may want a snapshot set */
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
/*
- * Clean up any dead statistics collector entries for this DB.
- * We always want to do this exactly once per DB-processing cycle,
- * even if we find nothing worth vacuuming in the database.
+ * Clean up any dead statistics collector entries for this DB. We always
+ * want to do this exactly once per DB-processing cycle, even if we find
+ * nothing worth vacuuming in the database.
*/
pgstat_vacuum_tabstat();
/*
* autovac_report_activity
- * Report to pgstat what autovacuum is doing
+ * Report to pgstat what autovacuum is doing
*
* We send a SQL string corresponding to what the user would see if the
* equivalent command was to be issued manually.
char *nspname = get_namespace_name(get_rel_namespace(relid));
/*
- * Paranoia is appropriate here in case relation was recently
- * dropped --- the lsyscache routines we just invoked will return
- * NULL rather than failing.
+ * Paranoia is appropriate here in case relation was recently dropped
+ * --- the lsyscache routines we just invoked will return NULL rather
+ * than failing.
*/
if (relname && nspname)
{
- int len = strlen(activity);
+ int len = strlen(activity);
snprintf(activity + len, MAX_AUTOVAC_ACTIV_LEN - len,
" %s.%s", nspname, relname);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.27 2006/08/17 23:04:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.28 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#endif
/*
- * Initialize so that first time-driven event happens at the correct
- * time.
+ * Initialize so that first time-driven event happens at the correct time.
*/
last_checkpoint_time = last_xlog_switch_time = time(NULL);
/*
- * Create a resource owner to keep track of our resources (currently
- * only buffer pins).
+ * Create a resource owner to keep track of our resources (currently only
+ * buffer pins).
*/
CurrentResourceOwner = ResourceOwnerCreate(NULL, "Background Writer");
BgBufferSync();
/*
- * Check for archive_timeout, if so, switch xlog files. First
- * we do a quick check using possibly-stale local state.
+ * Check for archive_timeout, if so, switch xlog files. First we do a
+ * quick check using possibly-stale local state.
*/
if (XLogArchiveTimeout > 0 &&
(int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
{
/*
- * Update local state ... note that last_xlog_switch_time is
- * the last time a switch was performed *or requested*.
+ * Update local state ... note that last_xlog_switch_time is the
+ * last time a switch was performed *or requested*.
*/
- time_t last_time = GetLastSegSwitchTime();
+ time_t last_time = GetLastSegSwitchTime();
last_xlog_switch_time = Max(last_xlog_switch_time, last_time);
/* Now we can do the real check */
if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout)
{
- XLogRecPtr switchpoint;
+ XLogRecPtr switchpoint;
/* OK, it's time to switch */
switchpoint = RequestXLogSwitch();
ereport(DEBUG1,
(errmsg("xlog switch forced (archive_timeout=%d)",
XLogArchiveTimeout)));
+
/*
- * Update state in any case, so we don't retry constantly
- * when the system is idle.
+ * Update state in any case, so we don't retry constantly when
+ * the system is idle.
*/
last_xlog_switch_time = now;
}
(bgwriter_lru_percent > 0.0 && bgwriter_lru_maxpages > 0))
udelay = BgWriterDelay * 1000L;
else if (XLogArchiveTimeout > 0)
- udelay = 1000000L; /* One second */
+ udelay = 1000000L; /* One second */
else
- udelay = 10000000L; /* Ten seconds */
+ udelay = 10000000L; /* Ten seconds */
while (udelay > 999999L)
{
*
* Copyright (c) 2001-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.138 2006/08/28 19:38:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.139 2006/10/04 00:29:56 momjian Exp $
* ----------
*/
#include "postgres.h"
static PgBackendStatus *localBackendStatusTable = NULL;
static int localNumBackends = 0;
-static volatile bool need_exit = false;
-static volatile bool need_statwrite = false;
+static volatile bool need_exit = false;
+static volatile bool need_statwrite = false;
/* ----------
char test_byte;
int sel_res;
int tries = 0;
-
+
#define TESTBYTEVAL ((char) 199)
/*
* Force start of collector daemon if something to collect. Note that
- * pgstat_collect_querystring is now an independent facility that does
- * not require the collector daemon.
+ * pgstat_collect_querystring is now an independent facility that does not
+ * require the collector daemon.
*/
if (pgstat_collect_tuplelevel ||
pgstat_collect_blocklevel)
if (++tries > 1)
ereport(LOG,
- (errmsg("trying another address for the statistics collector")));
-
+ (errmsg("trying another address for the statistics collector")));
+
/*
* Create the socket.
*/
return postmaster_forkexec(ac, av);
}
-
#endif /* EXEC_BACKEND */
msg.m_tableid[0] = relid;
msg.m_nentries = 1;
- len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) + sizeof(Oid);
+ len = offsetof(PgStat_MsgTabpurge, m_tableid[0]) +sizeof(Oid);
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_TABPURGE);
msg.m_databaseid = MyDatabaseId;
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
msg.m_analyze = analyze;
- msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
msg.m_vacuumtime = GetCurrentTimestamp();
msg.m_tuples = tuples;
pgstat_send(&msg, sizeof(msg));
pgstat_setheader(&msg.m_hdr, PGSTAT_MTYPE_ANALYZE);
msg.m_databaseid = shared ? InvalidOid : MyDatabaseId;
msg.m_tableoid = tableoid;
- msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
+ msg.m_autovacuum = IsAutoVacuumProcess(); /* is this autovacuum? */
msg.m_analyzetime = GetCurrentTimestamp();
msg.m_live_tuples = livetuples;
msg.m_dead_tuples = deadtuples;
void
pgstat_count_xact_commit(void)
{
- if (!pgstat_collect_tuplelevel &&
- !pgstat_collect_blocklevel)
+ if (!pgstat_collect_tuplelevel &&
+ !pgstat_collect_blocklevel)
return;
pgStatXactCommit++;
void
pgstat_count_xact_rollback(void)
{
- if (!pgstat_collect_tuplelevel &&
- !pgstat_collect_blocklevel)
+ if (!pgstat_collect_tuplelevel &&
+ !pgstat_collect_blocklevel)
return;
pgStatXactRollback++;
MyBEEntry = &BackendStatusArray[MyBackendId - 1];
/*
- * To minimize the time spent modifying the entry, fetch all the
- * needed data first.
+ * To minimize the time spent modifying the entry, fetch all the needed
+ * data first.
*
* If we have a MyProcPort, use its session start time (for consistency,
* and to save a kernel call).
/*
* Initialize my status entry, following the protocol of bumping
- * st_changecount before and after; and make sure it's even afterwards.
- * We use a volatile pointer here to ensure the compiler doesn't try to
- * get cute.
+ * st_changecount before and after; and make sure it's even afterwards. We
+ * use a volatile pointer here to ensure the compiler doesn't try to get
+ * cute.
*/
beentry = MyBEEntry;
- do {
+ do
+ {
beentry->st_changecount++;
} while ((beentry->st_changecount & 1) == 0);
pgstat_report_tabstat();
/*
- * Clear my status entry, following the protocol of bumping
- * st_changecount before and after. We use a volatile pointer here
- * to ensure the compiler doesn't try to get cute.
+ * Clear my status entry, following the protocol of bumping st_changecount
+ * before and after. We use a volatile pointer here to ensure the
+ * compiler doesn't try to get cute.
*/
beentry->st_changecount++;
return;
/*
- * To minimize the time spent modifying the entry, fetch all the
- * needed data first.
+ * To minimize the time spent modifying the entry, fetch all the needed
+ * data first.
*/
start_timestamp = GetCurrentStatementStartTimestamp();
/*
* Update my status entry, following the protocol of bumping
- * st_changecount before and after. We use a volatile pointer here
- * to ensure the compiler doesn't try to get cute.
+ * st_changecount before and after. We use a volatile pointer here to
+ * ensure the compiler doesn't try to get cute.
*/
beentry->st_changecount++;
for (i = 1; i <= MaxBackends; i++)
{
/*
- * Follow the protocol of retrying if st_changecount changes while
- * we copy the entry, or if it's odd. (The check for odd is needed
- * to cover the case where we are able to completely copy the entry
- * while the source backend is between increment steps.) We use a
- * volatile pointer here to ensure the compiler doesn't try to get
- * cute.
+ * Follow the protocol of retrying if st_changecount changes while we
+ * copy the entry, or if it's odd. (The check for odd is needed to
+ * cover the case where we are able to completely copy the entry while
+ * the source backend is between increment steps.) We use a volatile
+ * pointer here to ensure the compiler doesn't try to get cute.
*/
for (;;)
{
- int save_changecount = beentry->st_changecount;
+ int save_changecount = beentry->st_changecount;
/*
- * XXX if PGBE_ACTIVITY_SIZE is really large, it might be best
- * to use strcpy not memcpy for copying the activity string?
+ * XXX if PGBE_ACTIVITY_SIZE is really large, it might be best to
+ * use strcpy not memcpy for copying the activity string?
*/
memcpy(localentry, (char *) beentry, sizeof(PgBackendStatus));
/* ----------
* PgstatCollectorMain() -
*
- * Start up the statistics collector process. This is the body of the
+ * Start up the statistics collector process. This is the body of the
* postmaster child process.
*
* The argc/argv parameters are valid only in EXEC_BACKEND case.
bool need_timer = false;
int len;
PgStat_Msg msg;
+
#ifdef HAVE_POLL
struct pollfd input_fd;
#else
pgstat_read_statsfile(&pgStatDBHash, InvalidOid);
/*
- * Setup the descriptor set for select(2). Since only one bit in the
- * set ever changes, we need not repeat FD_ZERO each time.
+ * Setup the descriptor set for select(2). Since only one bit in the set
+ * ever changes, we need not repeat FD_ZERO each time.
*/
#ifndef HAVE_POLL
FD_ZERO(&rfds);
* Loop to process messages until we get SIGQUIT or detect ungraceful
* death of our parent postmaster.
*
- * For performance reasons, we don't want to do a PostmasterIsAlive()
- * test after every message; instead, do it at statwrite time and if
+ * For performance reasons, we don't want to do a PostmasterIsAlive() test
+ * after every message; instead, do it at statwrite time and if
* select()/poll() is interrupted by timeout.
*/
for (;;)
{
- int got_data;
+ int got_data;
/*
* Quit if we get SIGQUIT from the postmaster.
break;
/*
- * If time to write the stats file, do so. Note that the alarm
+ * If time to write the stats file, do so. Note that the alarm
* interrupt isn't re-enabled immediately, but only after we next
* receive a stats message; so no cycles are wasted when there is
* nothing going on.
* Wait for a message to arrive; but not for more than
* PGSTAT_SELECT_TIMEOUT seconds. (This determines how quickly we will
* shut down after an ungraceful postmaster termination; so it needn't
- * be very fast. However, on some systems SIGQUIT won't interrupt
- * the poll/select call, so this also limits speed of response to
- * SIGQUIT, which is more important.)
+ * be very fast. However, on some systems SIGQUIT won't interrupt the
+ * poll/select call, so this also limits speed of response to SIGQUIT,
+ * which is more important.)
*
* We use poll(2) if available, otherwise select(2)
*/
}
got_data = (input_fd.revents != 0);
-
#else /* !HAVE_POLL */
FD_SET(pgStatSock, &rfds);
}
got_data = FD_ISSET(pgStatSock, &rfds);
-
#endif /* HAVE_POLL */
/*
{
if (setitimer(ITIMER_REAL, &write_timeout, NULL))
ereport(ERROR,
- (errmsg("could not set statistics collector timer: %m")));
+ (errmsg("could not set statistics collector timer: %m")));
need_timer = false;
}
}
else
{
/*
- * We can only get here if the select/poll timeout elapsed.
- * Check for postmaster death.
+ * We can only get here if the select/poll timeout elapsed. Check
+ * for postmaster death.
*/
if (!PostmasterIsAlive(true))
break;
}
- } /* end of message-processing loop */
+ } /* end of message-processing loop */
/*
* Save the final stats to reuse at next startup.
while ((dbentry = (PgStat_StatDBEntry *) hash_seq_search(&hstat)) != NULL)
{
/*
- * Write out the DB entry including the number of live backends.
- * We don't write the tables pointer since it's of no use to any
- * other process.
+ * Write out the DB entry including the number of live backends. We
+ * don't write the tables pointer since it's of no use to any other
+ * process.
*/
fputc('D', fpout);
fwrite(dbentry, offsetof(PgStat_StatDBEntry, tables), 1, fpout);
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not write temporary statistics file \"%s\": %m",
- PGSTAT_STAT_TMPFILE)));
+ errmsg("could not write temporary statistics file \"%s\": %m",
+ PGSTAT_STAT_TMPFILE)));
fclose(fpout);
unlink(PGSTAT_STAT_TMPFILE);
}
if (tabentry == NULL)
return;
- if (msg->m_autovacuum)
+ if (msg->m_autovacuum)
tabentry->autovac_vacuum_timestamp = msg->m_vacuumtime;
- else
- tabentry->vacuum_timestamp = msg->m_vacuumtime;
+ else
+ tabentry->vacuum_timestamp = msg->m_vacuumtime;
tabentry->n_live_tuples = msg->m_tuples;
tabentry->n_dead_tuples = 0;
if (msg->m_analyze)
if (tabentry == NULL)
return;
- if (msg->m_autovacuum)
+ if (msg->m_autovacuum)
tabentry->autovac_analyze_timestamp = msg->m_analyzetime;
- else
+ else
tabentry->analyze_timestamp = msg->m_analyzetime;
tabentry->n_live_tuples = msg->m_live_tuples;
tabentry->n_dead_tuples = msg->m_dead_tuples;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.499 2006/08/15 18:26:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.500 2006/10/04 00:29:56 momjian Exp $
*
* NOTES
*
static Dllist *BackendList;
#ifdef EXEC_BACKEND
-/*
+/*
* Number of entries in the backend table. Twice the number of backends,
- * plus four other subprocesses (stats, bgwriter, autovac, logger).
+ * plus four other subprocesses (stats, bgwriter, autovac, logger).
*/
#define NUM_BACKENDARRAY_ELEMS (2*MaxBackends + 4)
static Backend *ShmemBackendArray;
break;
case 'T':
+
/*
* In the event that some backend dumps core, send SIGSTOP,
* rather than SIGQUIT, to all its peers. This lets the wily
break;
case 't':
- {
- const char *tmp = get_stats_option_name(optarg);
-
- if (tmp)
{
- SetConfigOption(tmp, "true", PGC_POSTMASTER, PGC_S_ARGV);
- }
- else
- {
- write_stderr("%s: invalid argument for option -t: \"%s\"\n",
- progname, optarg);
- ExitPostmaster(1);
+ const char *tmp = get_stats_option_name(optarg);
+
+ if (tmp)
+ {
+ SetConfigOption(tmp, "true", PGC_POSTMASTER, PGC_S_ARGV);
+ }
+ else
+ {
+ write_stderr("%s: invalid argument for option -t: \"%s\"\n",
+ progname, optarg);
+ ExitPostmaster(1);
+ }
+ break;
}
- break;
- }
case 'W':
SetConfigOption("post_auth_delay", optarg, PGC_POSTMASTER, PGC_S_ARGV);
* postmaster's listen sockets. (In EXEC_BACKEND case this is all
* done in SubPostmasterMain.)
*/
- IsUnderPostmaster = true; /* we are a postmaster subprocess now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess now */
- MyProcPid = getpid(); /* reset MyProcPid */
+ MyProcPid = getpid(); /* reset MyProcPid */
/* We don't want the postmaster's proc_exit() handlers */
on_exit_reset();
* title for ps. It's good to do this as early as possible in startup.
*/
init_ps_display(port->user_name, port->database_name, remote_ps_data,
- update_process_title ? "authentication" : "");
-
+ update_process_title ? "authentication" : "");
+
/*
* Now perform authentication exchange.
*/
/*
* Perform additional initialization and client authentication.
*
- * We want to do this before InitProcess() for a couple of reasons:
- * 1. so that we aren't eating up a PGPROC slot while waiting on the
- * client.
- * 2. so that if InitProcess() fails due to being out of PGPROC slots,
- * we have already initialized libpq and are able to report the error
- * to the client.
+ * We want to do this before InitProcess() for a couple of reasons: 1.
+ * so that we aren't eating up a PGPROC slot while waiting on the
+ * client. 2. so that if InitProcess() fails due to being out of
+ * PGPROC slots, we have already initialized libpq and are able to
+ * report the error to the client.
*/
BackendInitialize(&port);
InitProcess();
/*
- * Attach process to shared data structures. If testing
- * EXEC_BACKEND on Linux, you must run this as root
- * before starting the postmaster:
+ * Attach process to shared data structures. If testing EXEC_BACKEND
+ * on Linux, you must run this as root before starting the postmaster:
*
- * echo 0 >/proc/sys/kernel/randomize_va_space
+ * echo 0 >/proc/sys/kernel/randomize_va_space
*
- * This prevents a randomized stack base address that causes
- * child shared memory to be at a different address than
- * the parent, making it impossible to attached to shared
- * memory. Return the value to '1' when finished.
+ * This prevents a randomized stack base address that causes child
+ * shared memory to be at a different address than the parent, making
+ * it impossible to attached to shared memory. Return the value to
+ * '1' when finished.
*/
CreateSharedMemoryAndSemaphores(false, 0);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.114 2006/09/05 21:08:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.115 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void checkRuleResultList(List *targetList, TupleDesc resultDesc,
- bool isSelect);
+ bool isSelect);
static bool setRuleCheckAsUser_walker(Node *node, Oid *context);
static void setRuleCheckAsUser_Query(Query *qry, Oid userid);
*/
if (!replace && event_relation->rd_rules != NULL)
{
- int i;
+ int i;
for (i = 0; i < event_relation->rd_rules->numLocks; i++)
{
else
{
/*
- * For non-SELECT rules, a RETURNING list can appear in at most one
- * of the actions ... and there can't be any RETURNING list at all
- * in a conditional or non-INSTEAD rule. (Actually, there can be
- * at most one RETURNING list across all rules on the same event,
- * but it seems best to enforce that at rule expansion time.) If
- * there is a RETURNING list, it must match the event relation.
+ * For non-SELECT rules, a RETURNING list can appear in at most one of
+ * the actions ... and there can't be any RETURNING list at all in a
+ * conditional or non-INSTEAD rule. (Actually, there can be at most
+ * one RETURNING list across all rules on the same event, but it seems
+ * best to enforce that at rule expansion time.) If there is a
+ * RETURNING list, it must match the event relation.
*/
- bool haveReturning = false;
+ bool haveReturning = false;
foreach(l, action)
{
if (haveReturning)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot have multiple RETURNING lists in a rule")));
+ errmsg("cannot have multiple RETURNING lists in a rule")));
haveReturning = true;
if (event_qual != NULL)
ereport(ERROR,
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
isSelect ?
- errmsg("SELECT rule's target list has too many entries") :
+ errmsg("SELECT rule's target list has too many entries") :
errmsg("RETURNING list has too many entries")));
attr = resultDesc->attrs[i - 1];
/*
* Allow typmods to be different only if one of them is -1, ie,
- * "unspecified". This is necessary for cases like "numeric",
- * where the table will have a filled-in default length but the
- * select rule's expression will probably have typmod = -1.
+ * "unspecified". This is necessary for cases like "numeric", where
+ * the table will have a filled-in default length but the select
+ * rule's expression will probably have typmod = -1.
*/
tletypmod = exprTypmod((Node *) tle->expr);
if (attr->atttypmod != tletypmod &&
heap_freetuple(ruletup);
heap_close(pg_rewrite_desc, RowExclusiveLock);
}
+
#endif
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.166 2006/09/02 17:06:52 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.167 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool *returning_flag);
static List *adjustJoinTreeList(Query *parsetree, bool removert, int rt_index);
static void rewriteTargetList(Query *parsetree, Relation target_relation,
- List **attrno_list);
+ List **attrno_list);
static TargetEntry *process_matched_tle(TargetEntry *src_tle,
TargetEntry *prior_tle,
const char *attrName);
static Node *get_assignment_input(Node *node);
static void rewriteValuesRTE(RangeTblEntry *rte, Relation target_relation,
- List *attrnos);
+ List *attrnos);
static void markQueryForLocking(Query *qry, bool forUpdate, bool noWait,
bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
}
/*
- * If rule_action has a RETURNING clause, then either throw it away
- * if the triggering query has no RETURNING clause, or rewrite it to
- * emit what the triggering query's RETURNING clause asks for. Throw
- * an error if more than one rule has a RETURNING clause.
+ * If rule_action has a RETURNING clause, then either throw it away if the
+ * triggering query has no RETURNING clause, or rewrite it to emit what
+ * the triggering query's RETURNING clause asks for. Throw an error if
+ * more than one rule has a RETURNING clause.
*/
if (!parsetree->returningList)
rule_action->returningList = NIL;
if (*returning_flag)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot have RETURNING lists in multiple rules")));
+ errmsg("cannot have RETURNING lists in multiple rules")));
*returning_flag = true;
rule_action->returningList = (List *)
ResolveNew((Node *) parsetree->returningList,
foreach(lc, rte->values_lists)
{
- List *sublist = (List *) lfirst(lc);
- ListCell *lc2;
+ List *sublist = (List *) lfirst(lc);
+ ListCell *lc2;
foreach(lc2, sublist)
{
- Node *col = (Node *) lfirst(lc2);
+ Node *col = (Node *) lfirst(lc2);
if (IsA(col, SetToDefault))
return true;
newValues = NIL;
foreach(lc, rte->values_lists)
{
- List *sublist = (List *) lfirst(lc);
- List *newList = NIL;
- ListCell *lc2;
- ListCell *lc3;
+ List *sublist = (List *) lfirst(lc);
+ List *newList = NIL;
+ ListCell *lc2;
+ ListCell *lc3;
forboth(lc2, sublist, lc3, attrnos)
{
- Node *col = (Node *) lfirst(lc2);
- int attrno = lfirst_int(lc3);
+ Node *col = (Node *) lfirst(lc2);
+ int attrno = lfirst_int(lc3);
if (IsA(col, SetToDefault))
{
if (!att_tup->attisdropped)
new_expr = build_column_default(target_relation, attrno);
else
- new_expr = NULL; /* force a NULL if dropped */
+ new_expr = NULL; /* force a NULL if dropped */
/*
* If there is no default (ie, default is effectively NULL),
RangeTblEntry *values_rte = NULL;
/*
- * If it's an INSERT ... VALUES (...), (...), ...
- * there will be a single RTE for the VALUES targetlists.
+ * If it's an INSERT ... VALUES (...), (...), ... there will be a
+ * single RTE for the VALUES targetlists.
*/
if (list_length(parsetree->jointree->fromlist) == 1)
{
if (values_rte)
{
- List *attrnos;
+ List *attrnos;
/* Process the main targetlist ... */
rewriteTargetList(parsetree, rt_entry_relation, &attrnos);
}
/*
- * If there is an INSTEAD, and the original query has a RETURNING,
- * we have to have found a RETURNING in the rule(s), else fail.
- * (Because DefineQueryRewrite only allows RETURNING in unconditional
- * INSTEAD rules, there's no need to worry whether the substituted
- * RETURNING will actually be executed --- it must be.)
+ * If there is an INSTEAD, and the original query has a RETURNING, we
+ * have to have found a RETURNING in the rule(s), else fail. (Because
+ * DefineQueryRewrite only allows RETURNING in unconditional INSTEAD
+ * rules, there's no need to worry whether the substituted RETURNING
+ * will actually be executed --- it must be.)
*/
if ((instead || qual_product != NULL) &&
parsetree->returningList &&
case CMD_INSERT:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot INSERT RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot INSERT RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON INSERT DO INSTEAD rule with a RETURNING clause.")));
break;
case CMD_UPDATE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot UPDATE RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot UPDATE RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON UPDATE DO INSTEAD rule with a RETURNING clause.")));
break;
case CMD_DELETE:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot DELETE RETURNING on relation \"%s\"",
- RelationGetRelationName(rt_entry_relation)),
+ errmsg("cannot DELETE RETURNING on relation \"%s\"",
+ RelationGetRelationName(rt_entry_relation)),
errhint("You need an unconditional ON DELETE DO INSTEAD rule with a RETURNING clause.")));
break;
default:
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.101 2006/07/14 14:52:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.102 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
if (IsA(result, Query))
((Query *) result)->hasSubLinks = true;
+
/*
* Note: if we're called on a non-Query node then it's the caller's
- * responsibility to update hasSubLinks in the ancestor Query.
- * This is pretty fragile and perhaps should be rethought ...
+ * responsibility to update hasSubLinks in the ancestor Query. This is
+ * pretty fragile and perhaps should be rethought ...
*/
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.67 2006/10/03 21:21:36 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteRemove.c,v 1.68 2006/10/04 00:29:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
void
RemoveRewriteRule(Oid owningRel, const char *ruleName, DropBehavior behavior,
- bool missing_ok)
+ bool missing_ok)
{
HeapTuple tuple;
Oid eventRelationOid;
*/
if (!HeapTupleIsValid(tuple))
{
- if (! missing_ok)
+ if (!missing_ok)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("rule \"%s\" for relation \"%s\" does not exist",
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.211 2006/09/25 22:01:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.212 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* This can happen because mdread doesn't complain about reads beyond
* EOF --- which is arguably bogus, but changing it seems tricky ---
* and so a previous attempt to read a block just beyond EOF could
- * have left a "valid" zero-filled buffer. Unfortunately, we have
+ * have left a "valid" zero-filled buffer. Unfortunately, we have
* also seen this case occurring because of buggy Linux kernels that
* sometimes return an lseek(SEEK_END) result that doesn't account for
- * a recent write. In that situation, the pre-existing buffer would
+ * a recent write. In that situation, the pre-existing buffer would
* contain valid data that we don't want to overwrite. Since the
* legitimate cases should always have left a zero-filled buffer,
* complain if not PageIsNew.
errhint("This has been seen to occur with buggy kernels; consider updating your system.")));
/*
- * We *must* do smgrextend before succeeding, else the
- * page will not be reserved by the kernel, and the next P_NEW call
- * will decide to return the same page. Clear the BM_VALID bit,
- * do the StartBufferIO call that BufferAlloc didn't, and proceed.
+ * We *must* do smgrextend before succeeding, else the page will not
+ * be reserved by the kernel, and the next P_NEW call will decide to
+ * return the same page. Clear the BM_VALID bit, do the StartBufferIO
+ * call that BufferAlloc didn't, and proceed.
*/
if (isLocalBuf)
{
else
{
/*
- * Loop to handle the very small possibility that someone
- * re-sets BM_VALID between our clearing it and StartBufferIO
- * inspecting it.
+ * Loop to handle the very small possibility that someone re-sets
+ * BM_VALID between our clearing it and StartBufferIO inspecting
+ * it.
*/
- do {
+ do
+ {
LockBufHdr(bufHdr);
Assert(bufHdr->flags & BM_VALID);
bufHdr->flags &= ~BM_VALID;
{
BufferTag newTag; /* identity of requested block */
uint32 newHash; /* hash value for newTag */
- LWLockId newPartitionLock; /* buffer partition lock for it */
+ LWLockId newPartitionLock; /* buffer partition lock for it */
BufferTag oldTag; /* previous identity of selected buffer */
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
int buf_id;
volatile BufferDesc *buf;
{
BufferTag oldTag;
uint32 oldHash; /* hash value for oldTag */
- LWLockId oldPartitionLock; /* buffer partition lock for it */
+ LWLockId oldPartitionLock; /* buffer partition lock for it */
BufFlags oldFlags;
/* Save the original buffer tag before dropping the spinlock */
UnlockBufHdr(buf);
/*
- * Need to compute the old tag's hashcode and partition lock ID.
- * XXX is it worth storing the hashcode in BufferDesc so we need
- * not recompute it here? Probably not.
+ * Need to compute the old tag's hashcode and partition lock ID. XXX is it
+ * worth storing the hashcode in BufferDesc so we need not recompute it
+ * here? Probably not.
*/
oldHash = BufTableHashCode(&oldTag);
oldPartitionLock = BufMappingPartitionLock(oldHash);
*
* Marks buffer contents as dirty (actual write happens later).
*
- * Buffer must be pinned and exclusive-locked. (If caller does not hold
+ * Buffer must be pinned and exclusive-locked. (If caller does not hold
* exclusive lock, then somebody could be in process of writing the buffer,
* leading to risk of bad data written to disk.)
*/
{
/*
* If in bgwriter, absorb pending fsync requests after each
- * WRITES_PER_ABSORB write operations, to prevent overflow of
- * the fsync request queue. If not in bgwriter process, this is
- * a no-op.
+ * WRITES_PER_ABSORB write operations, to prevent overflow of the
+ * fsync request queue. If not in bgwriter process, this is a
+ * no-op.
*/
if (--absorb_counter <= 0)
{
/*
* This routine might get called many times on the same page, if we are
* making the first scan after commit of an xact that added/deleted many
- * tuples. So, be as quick as we can if the buffer is already dirty. We
+ * tuples. So, be as quick as we can if the buffer is already dirty. We
* do this by not acquiring spinlock if it looks like the status bits are
- * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
+ * already OK. (Note it is okay if someone else clears BM_JUST_DIRTIED
* immediately after we look, because the buffer content update is already
* done and will be reflected in the I/O.)
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.56 2006/07/23 18:34:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.57 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Initialize the shared buffer lookup hashtable.
*
- * Since we can't tolerate running out of lookup table entries, we
- * must be sure to specify an adequate table size here. The maximum
- * steady-state usage is of course NBuffers entries, but BufferAlloc()
- * tries to insert a new entry before deleting the old. In principle
- * this could be happening in each partition concurrently, so we
- * could need as many as NBuffers + NUM_BUFFER_PARTITIONS entries.
+ * Since we can't tolerate running out of lookup table entries, we must be
+ * sure to specify an adequate table size here. The maximum steady-state
+ * usage is of course NBuffers entries, but BufferAlloc() tries to insert
+ * a new entry before deleting the old. In principle this could be
+ * happening in each partition concurrently, so we could need as many as
+ * NBuffers + NUM_BUFFER_PARTITIONS entries.
*/
InitBufTable(NBuffers + NUM_BUFFER_PARTITIONS);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.129 2006/08/24 03:15:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.130 2006/10/04 00:29:57 momjian Exp $
*
* NOTES:
*
* in which case immediate retry is indicated.
*/
#ifdef WIN32
- DWORD error = GetLastError();
+ DWORD error = GetLastError();
switch (error)
{
* See comments in FileRead()
*/
#ifdef WIN32
- DWORD error = GetLastError();
+ DWORD error = GetLastError();
switch (error)
{
}
/*
- * TEMPORARY hack to log the Windows error code on fopen failures,
- * in hopes of diagnosing some hard-to-reproduce problems.
+ * TEMPORARY hack to log the Windows error code on fopen failures, in
+ * hopes of diagnosing some hard-to-reproduce problems.
*/
#ifdef WIN32
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.55 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.56 2006/10/04 00:29:57 momjian Exp $
*
*
* NOTES:
RelFileNode key; /* hash key (must be first) */
bool isIndex; /* if true, we store only page numbers */
uint32 avgRequest; /* moving average of space requests */
- BlockNumber interestingPages; /* # of pages with useful free space */
+ BlockNumber interestingPages; /* # of pages with useful free space */
int32 storedPages; /* # of pages stored in arena */
} FsmCacheRelHeader;
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
static FSMRelation *create_fsm_rel(RelFileNode *rel);
static void delete_fsm_rel(FSMRelation *fsmrel);
-static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
- bool isIndex);
+static int realloc_fsm_rel(FSMRelation *fsmrel, BlockNumber interestingPages,
+ bool isIndex);
static void link_fsm_rel_usage(FSMRelation *fsmrel);
static void unlink_fsm_rel_usage(FSMRelation *fsmrel);
static void link_fsm_rel_storage(FSMRelation *fsmrel);
double needed;
LWLockAcquire(FreeSpaceLock, LW_EXCLUSIVE);
+
/*
* Count total space actually used, as well as the unclamped request total
*/
}
/*
- * We clamp the per-relation requests to at most half the arena size;
- * this is intended to prevent a single bloated relation from crowding
- * out FSM service for every other rel.
+ * We clamp the per-relation requests to at most half the arena size; this
+ * is intended to prevent a single bloated relation from crowding out FSM
+ * service for every other rel.
*/
req = Min(req, FreeSpaceMap->totalChunks / 2);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.87 2006/08/01 19:03:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.88 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
size = add_size(size, 8192 - (size % 8192));
/*
- * The shared memory for add-ins is treated as a separate
- * segment, but in reality it is not.
+ * The shared memory for add-ins is treated as a separate segment, but
+ * in reality it is not.
*/
size_b4addins = size;
size = add_size(size, AddinShmemSize());
* Modify hdr to show segment size before add-ins
*/
seghdr->totalsize = size_b4addins;
-
- /*
+
+ /*
* Set up segment header sections in each Addin context
*/
InitAddinContexts((void *) ((char *) seghdr + size_b4addins));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.17 2006/09/03 15:59:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/procarray.c,v 1.18 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* This is used by VACUUM to decide which deleted tuples must be preserved
* in a table. allDbs = TRUE is needed for shared relations, but allDbs =
* FALSE is sufficient for non-shared relations, since only backends in my
- * own database could ever see the tuples in them. Also, we can ignore
+ * own database could ever see the tuples in them. Also, we can ignore
* concurrently running lazy VACUUMs because (a) they must be working on other
* tables, and (b) they don't need to do snapshot-based lookups.
*
globalxmin = xmin = GetTopTransactionId();
/*
- * It is sufficient to get shared lock on ProcArrayLock, even if we
- * are computing a serializable snapshot and therefore will be setting
+ * It is sufficient to get shared lock on ProcArrayLock, even if we are
+ * computing a serializable snapshot and therefore will be setting
* MyProc->xmin. This is because any two backends that have overlapping
* shared holds on ProcArrayLock will certainly compute the same xmin
* (since no xact, in particular not the oldest, can exit the set of
* running transactions while we hold ProcArrayLock --- see further
- * discussion just below). So it doesn't matter whether another backend
+ * discussion just below). So it doesn't matter whether another backend
* concurrently doing GetSnapshotData or GetOldestXmin sees our xmin as
* set or not; he'd compute the same xmin for himself either way.
*/
/*
* Ignore my own proc (dealt with my xid above), procs not running a
- * transaction, xacts started since we read the next transaction
- * ID, and xacts executing LAZY VACUUM. There's no need to store XIDs
+ * transaction, xacts started since we read the next transaction ID,
+ * and xacts executing LAZY VACUUM. There's no need to store XIDs
* above what we got from ReadNewTransactionId, since we'll treat them
* as running anyway. We also assume that such xacts can't compute an
* xmin older than ours, so they needn't be considered in computing
* their parent, so no need to check them against xmin.
*
* The other backend can add more subxids concurrently, but cannot
- * remove any. Hence it's important to fetch nxids just once.
- * Should be safe to use memcpy, though. (We needn't worry about
- * missing any xids added concurrently, because they must postdate
- * xmax.)
+ * remove any. Hence it's important to fetch nxids just once. Should
+ * be safe to use memcpy, though. (We needn't worry about missing any
+ * xids added concurrently, because they must postdate xmax.)
*/
if (subcount >= 0)
{
if (proc->subxids.overflowed)
- subcount = -1; /* overflowed */
+ subcount = -1; /* overflowed */
else
{
- int nxids = proc->subxids.nxids;
+ int nxids = proc->subxids.nxids;
if (nxids > 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.96 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.97 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* hash bucket garbage collector if need be. Right now, it seems
* unnecessary.
*
- * (e) Add-ins can request their own logical shared memory segments
- * by calling RegisterAddinContext() from the preload-libraries hook.
- * Each call establishes a uniquely named add-in shared memopry
- * context which will be set up as part of postgres intialisation.
- * Memory can be allocated from these contexts using
- * ShmemAllocFromContext(), and can be reset to its initial condition
- * using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
- * can be used to request that a LWLock be allocated, placed into *lock_ptr.
+ * (e) Add-ins can request their own logical shared memory segments
+ * by calling RegisterAddinContext() from the preload-libraries hook.
+ * Each call establishes a uniquely named add-in shared memopry
+ * context which will be set up as part of postgres intialisation.
+ * Memory can be allocated from these contexts using
+ * ShmemAllocFromContext(), and can be reset to its initial condition
+ * using ShmemResetContext(). Also, RegisterAddinLWLock(LWLockid *lock_ptr)
+ * can be used to request that a LWLock be allocated, placed into *lock_ptr.
*/
#include "postgres.h"
/* Structures and globals for managing add-in shared memory contexts */
typedef struct context
{
- char *name;
- Size size;
- PGShmemHeader *seg_hdr;
+ char *name;
+ Size size;
+ PGShmemHeader *seg_hdr;
struct context *next;
} ContextNode;
Assert(shmhdr != NULL);
/*
- * Initialize the spinlock used by ShmemAlloc. We have to do the
- * space allocation the hard way, since obviously ShmemAlloc can't
- * be called yet.
+ * Initialize the spinlock used by ShmemAlloc. We have to do the space
+ * allocation the hard way, since obviously ShmemAlloc can't be called
+ * yet.
*/
ShmemLock = (slock_t *) (((char *) shmhdr) + shmhdr->freeoffset);
shmhdr->freeoffset += MAXALIGN(sizeof(slock_t));
ShmemIndex = (HTAB *) NULL;
/*
- * Initialize ShmemVariableCache for transaction manager.
- * (This doesn't really belong here, but not worth moving.)
+ * Initialize ShmemVariableCache for transaction manager. (This doesn't
+ * really belong here, but not worth moving.)
*/
ShmemVariableCache = (VariableCache)
- ShmemAlloc(sizeof(*ShmemVariableCache));
+ ShmemAlloc(sizeof(*ShmemVariableCache));
memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
/*
* RegisterAddinContext -- Register the requirement for a named shared
- * memory context.
+ * memory context.
*/
void
RegisterAddinContext(const char *name, Size size)
{
- char *newstr = malloc(strlen(name) + 1);
+ char *newstr = malloc(strlen(name) + 1);
ContextNode *node = malloc(sizeof(ContextNode));
strcpy(newstr, name);
/*
* ContextFromName -- Return the ContextNode for the given named
- * context, or NULL if not found.
+ * context, or NULL if not found.
*/
static ContextNode *
ContextFromName(const char *name)
/*
* InitAddinContexts -- Initialise the registered addin shared memory
- * contexts.
+ * contexts.
*/
void
InitAddinContexts(void *start)
next_segment->totalsize = context->size;
next_segment->freeoffset = MAXALIGN(sizeof(PGShmemHeader));
- next_segment = (PGShmemHeader *)
+ next_segment = (PGShmemHeader *)
((char *) next_segment + context->size);
context = context->next;
}
/*
* AddinShmemSize -- Report how much shared memory has been registered
- * for add-ins.
+ * for add-ins.
*/
Size
AddinShmemSize(void)
void *
ShmemAllocFromContext(Size size, const char *context_name)
{
- Size newStart;
- Size newFree;
- void *newSpace;
- ContextNode *context;
+ Size newStart;
+ Size newFree;
+ void *newSpace;
+ ContextNode *context;
/* use volatile pointer to prevent code rearrangement */
volatile PGShmemHeader *shmemseghdr = ShmemSegHdr;
- /*
+ /*
* if context_name is provided, allocate from the named context
*/
if (context_name)
* be trying to init the shmem index itself.
*
* Notice that the ShmemIndexLock is released before the shmem
- * index has been initialized. This should be OK because no
- * other process can be accessing shared memory yet.
+ * index has been initialized. This should be OK because no other
+ * process can be accessing shared memory yet.
*/
Assert(shmemseghdr->indexoffset == 0);
structPtr = ShmemAlloc(size);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.88 2006/09/22 23:20:13 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.89 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static inline void
SetLocktagRelationOid(LOCKTAG *tag, Oid relid)
{
- Oid dbid;
+ Oid dbid;
if (IsSharedRelation(relid))
dbid = InvalidOid;
/*
* LockRelationOid
*
- * Lock a relation given only its OID. This should generally be used
+ * Lock a relation given only its OID. This should generally be used
* before attempting to open the relation's relcache entry.
*/
void
res = LockAcquire(&tag, lockmode, false, false);
/*
- * Now that we have the lock, check for invalidation messages, so that
- * we will update or flush any stale relcache entry before we try to use
- * it. We can skip this in the not-uncommon case that we already had
- * the same type of lock being requested, since then no one else could
- * have modified the relcache entry in an undesirable way. (In the
- * case where our own xact modifies the rel, the relcache update happens
- * via CommandCounterIncrement, not here.)
+ * Now that we have the lock, check for invalidation messages, so that we
+ * will update or flush any stale relcache entry before we try to use it.
+ * We can skip this in the not-uncommon case that we already had the same
+ * type of lock being requested, since then no one else could have
+ * modified the relcache entry in an undesirable way. (In the case where
+ * our own xact modifies the rel, the relcache update happens via
+ * CommandCounterIncrement, not here.)
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
return false;
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
res = LockAcquire(&tag, lockmode, false, false);
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
return false;
/*
- * Now that we have the lock, check for invalidation messages; see
- * notes in LockRelationOid.
+ * Now that we have the lock, check for invalidation messages; see notes
+ * in LockRelationOid.
*/
if (res != LOCKACQUIRE_ALREADY_HELD)
AcceptInvalidationMessages();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.173 2006/09/18 22:40:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.174 2006/10/04 00:29:57 momjian Exp $
*
* NOTES
* A lock table is a shared memory hash table. When
};
#ifndef LOCK_DEBUG
-static bool Dummy_trace = false;
+static bool Dummy_trace = false;
#endif
static const LockMethodData default_lockmethod = {
init_table_size = max_table_size / 2;
/*
- * Allocate hash table for LOCK structs. This stores
- * per-locked-object information.
+ * Allocate hash table for LOCK structs. This stores per-locked-object
+ * information.
*/
MemSet(&info, 0, sizeof(info));
info.keysize = sizeof(LOCKTAG);
elog(FATAL, "could not initialize proclock hash table");
/*
- * Allocate non-shared hash table for LOCALLOCK structs. This stores
- * lock counts and resource owner information.
+ * Allocate non-shared hash table for LOCALLOCK structs. This stores lock
+ * counts and resource owner information.
*
* The non-shared table could already exist in this process (this occurs
* when the postmaster is recreating shared memory after a backend crash).
proclock_hash(const void *key, Size keysize)
{
const PROCLOCKTAG *proclocktag = (const PROCLOCKTAG *) key;
- uint32 lockhash;
- Datum procptr;
+ uint32 lockhash;
+ Datum procptr;
Assert(keysize == sizeof(PROCLOCKTAG));
/*
* To make the hash code also depend on the PGPROC, we xor the proc
* struct's address into the hash code, left-shifted so that the
- * partition-number bits don't change. Since this is only a hash,
- * we don't care if we lose high-order bits of the address; use
- * an intermediate variable to suppress cast-pointer-to-int warnings.
+ * partition-number bits don't change. Since this is only a hash, we
+ * don't care if we lose high-order bits of the address; use an
+ * intermediate variable to suppress cast-pointer-to-int warnings.
*/
procptr = PointerGetDatum(proclocktag->myProc);
lockhash ^= ((uint32) procptr) << LOG2_NUM_LOCK_PARTITIONS;
static inline uint32
ProcLockHashCode(const PROCLOCKTAG *proclocktag, uint32 hashcode)
{
- uint32 lockhash = hashcode;
- Datum procptr;
+ uint32 lockhash = hashcode;
+ Datum procptr;
/*
* This must match proclock_hash()!
memcpy(new_status, old_status, len);
strcpy(new_status + len, " waiting");
set_ps_display(new_status, false);
- new_status[len] = '\0'; /* truncate off " waiting" */
+ new_status[len] = '\0'; /* truncate off " waiting" */
}
pgstat_report_waiting(true);
LockTagHashCode(&lock->tag),
wakeupNeeded);
- next_item:
+ next_item:
proclock = nextplock;
- } /* loop over PROCLOCKs within this partition */
+ } /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
- } /* loop over partitions */
+ } /* loop over partitions */
#ifdef LOCK_DEBUG
if (*(lockMethodTable->trace_flag))
if (!lock)
{
/*
- * If the lock object doesn't exist, there is nothing holding a
- * lock on this lockable object.
+ * If the lock object doesn't exist, there is nothing holding a lock
+ * on this lockable object.
*/
LWLockRelease(partitionLock);
return NIL;
{
if (conflictMask & proclock->holdMask)
{
- PGPROC *proc = proclock->tag.myProc;
+ PGPROC *proc = proclock->tag.myProc;
/* A backend never blocks itself */
if (proc != MyProc)
/*
* We cannot simply modify proclock->tag.myProc to reassign
* ownership of the lock, because that's part of the hash key and
- * the proclock would then be in the wrong hash chain. So, unlink
+ * the proclock would then be in the wrong hash chain. So, unlink
* and delete the old proclock; create a new one with the right
* contents; and link it into place. We do it in this order to be
* certain we won't run out of shared memory (the way dynahash.c
(void *) &proclocktag,
HASH_ENTER_NULL, &found);
if (!newproclock)
- ereport(PANIC, /* should not happen */
+ ereport(PANIC, /* should not happen */
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
errdetail("Not enough memory for reassigning the prepared transaction's locks.")));
Assert((newproclock->holdMask & holdMask) == 0);
newproclock->holdMask |= holdMask;
- next_item:
+ next_item:
proclock = nextplock;
- } /* loop over PROCLOCKs within this partition */
+ } /* loop over PROCLOCKs within this partition */
LWLockRelease(partitionLock);
- } /* loop over partitions */
+ } /* loop over partitions */
END_CRIT_SECTION();
}
* operate one partition at a time if we want to deliver a self-consistent
* view of the state.
*
- * Since this is a read-only operation, we take shared instead of exclusive
- * lock. There's not a whole lot of point to this, because all the normal
- * operations require exclusive lock, but it doesn't hurt anything either.
- * It will at least allow two backends to do GetLockStatusData in parallel.
+ * Since this is a read-only operation, we take shared instead of
+ * exclusive lock. There's not a whole lot of point to this, because all
+ * the normal operations require exclusive lock, but it doesn't hurt
+ * anything either. It will at least allow two backends to do
+ * GetLockStatusData in parallel.
*
* Must grab LWLocks in partition-number order to avoid LWLock deadlock.
*/
}
/* And release locks */
- for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
+ for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
Assert(el == data->nelements);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.45 2006/08/07 21:56:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.46 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "storage/spin.h"
-static int NumAddinLWLocks(void);
+static int NumAddinLWLocks(void);
static void AssignAddinLWLocks(void);
static int *block_counts;
#endif
-/*
+/*
* Structures and globals to allow add-ins to register for their own
* lwlocks from the preload-libraries hook.
*/
typedef struct LWLockNode
{
- LWLockId *lock;
+ LWLockId *lock;
struct LWLockNode *next;
} LWLockNode;
static LWLockNode *addin_locks = NULL;
-static int num_addin_locks = 0;
+static int num_addin_locks = 0;
/*
* RegisterAddinLWLock() --- Allow an andd-in to request a LWLock
- * from the preload-libraries hook.
+ * from the preload-libraries hook.
*/
void
RegisterAddinLWLock(LWLockId *lock)
LWLockRelease(0);
}
-
-#endif /* LWLOCK_STATS */
+#endif /* LWLOCK_STATS */
/*
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
- /*
- * Allocate LWLocks for those add-ins that have explicitly requested
- * them.
+ /*
+ * Allocate LWLocks for those add-ins that have explicitly requested them.
*/
AssignAddinLWLocks();
}
/* Set up local count state first time through in a given process */
if (counts_for_pid != MyProcPid)
{
- int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
- int numLocks = LWLockCounter[1];
+ int *LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
+ int numLocks = LWLockCounter[1];
sh_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts = calloc(numLocks, sizeof(int));
ex_acquire_counts[lockid]++;
else
sh_acquire_counts[lockid]++;
-#endif /* LWLOCK_STATS */
+#endif /* LWLOCK_STATS */
/*
* We can't wait if we haven't got a PGPROC. This should only occur
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.179 2006/07/30 02:07:18 alvherre Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.180 2006/10/04 00:29:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(!found);
/*
- * Create the PGPROC structures for dummy (bgwriter) processes, too.
- * These do not get linked into the freeProcs list.
+ * Create the PGPROC structures for dummy (bgwriter) processes, too. These
+ * do not get linked into the freeProcs list.
*/
DummyProcs = (PGPROC *)
ShmemInitStruct("DummyProcs", NUM_DUMMY_PROCS * sizeof(PGPROC),
MemSet(DummyProcs, 0, NUM_DUMMY_PROCS * sizeof(PGPROC));
for (i = 0; i < NUM_DUMMY_PROCS; i++)
{
- DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
+ DummyProcs[i].pid = 0; /* marks dummy proc as not in use */
PGSemaphoreCreate(&(DummyProcs[i].sem));
}
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
Assert(MyProc != NULL);
/*
- * We should now know what database we're in, so advertise that. (We
- * need not do any locking here, since no other backend can yet see
- * our PGPROC.)
+ * We should now know what database we're in, so advertise that. (We need
+ * not do any locking here, since no other backend can yet see our
+ * PGPROC.)
*/
Assert(OidIsValid(MyDatabaseId));
MyProc->databaseId = MyDatabaseId;
/*
* We might be reusing a semaphore that belonged to a failed process. So
- * be careful and reinitialize its value here. (This is not strictly
+ * be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
*/
PGSemaphoreReset(&MyProc->sem);
/*
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
- * semaphore is reset to zero. This prevented a leftover wakeup signal
- * from remaining in the semaphore if someone else had granted us the
- * lock we wanted before we were able to remove ourselves from the
- * wait-list. However, now that ProcSleep loops until waitStatus changes,
- * a leftover wakeup signal isn't harmful, and it seems not worth
- * expending cycles to get rid of a signal that most likely isn't there.
+ * semaphore is reset to zero. This prevented a leftover wakeup signal
+ * from remaining in the semaphore if someone else had granted us the lock
+ * we wanted before we were able to remove ourselves from the wait-list.
+ * However, now that ProcSleep loops until waitStatus changes, a leftover
+ * wakeup signal isn't harmful, and it seems not worth expending cycles to
+ * get rid of a signal that most likely isn't there.
*/
/*
/*
* If someone wakes us between LWLockRelease and PGSemaphoreLock,
* PGSemaphoreLock will not block. The wakeup is "saved" by the semaphore
- * implementation. While this is normally good, there are cases where
- * a saved wakeup might be leftover from a previous operation (for
- * example, we aborted ProcWaitForSignal just before someone did
- * ProcSendSignal). So, loop to wait again if the waitStatus shows
- * we haven't been granted nor denied the lock yet.
+ * implementation. While this is normally good, there are cases where a
+ * saved wakeup might be leftover from a previous operation (for example,
+ * we aborted ProcWaitForSignal just before someone did ProcSendSignal).
+ * So, loop to wait again if the waitStatus shows we haven't been granted
+ * nor denied the lock yet.
*
* We pass interruptOK = true, which eliminates a window in which
* cancel/die interrupts would be held off undesirably. This is a promise
* updating the locallock table, but if we lose control to an error,
* LockWaitCancel will fix that up.
*/
- do {
+ do
+ {
PGSemaphoreLock(&MyProc->sem, true);
} while (MyProc->waitStatus == STATUS_WAITING);
elog(FATAL, "could not disable timer for process wakeup");
/*
- * Re-acquire the lock table's partition lock. We have to do this to
- * hold off cancel/die interrupts before we can mess with lockAwaited
- * (else we might have a missed or duplicated locallock update).
+ * Re-acquire the lock table's partition lock. We have to do this to hold
+ * off cancel/die interrupts before we can mess with lockAwaited (else we
+ * might have a missed or duplicated locallock update).
*/
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
int i;
/*
- * Acquire exclusive lock on the entire shared lock data structures.
- * Must grab LWLocks in partition-number order to avoid LWLock deadlock.
+ * Acquire exclusive lock on the entire shared lock data structures. Must
+ * grab LWLocks in partition-number order to avoid LWLock deadlock.
*
* Note that the deadlock check interrupt had better not be enabled
* anywhere that this process itself holds lock partition locks, else this
/*
* Oops. We have a deadlock.
*
- * Get this process out of wait state. (Note: we could do this more
+ * Get this process out of wait state. (Note: we could do this more
* efficiently by relying on lockAwaited, but use this coding to preserve
* the flexibility to kill some other transaction than the one detecting
* the deadlock.)
*/
/*
- * Release locks acquired at head of routine. Order is not critical,
- * so do it back-to-front to avoid waking another CheckDeadLock instance
+ * Release locks acquired at head of routine. Order is not critical, so
+ * do it back-to-front to avoid waking another CheckDeadLock instance
* before it can get all the locks.
*/
check_done:
- for (i = NUM_LOCK_PARTITIONS; --i >= 0; )
+ for (i = NUM_LOCK_PARTITIONS; --i >= 0;)
LWLockRelease(FirstLockMgrLock + i);
}
* This can share the semaphore normally used for waiting for locks,
* since a backend could never be waiting for a lock and a signal at
* the same time. As with locks, it's OK if the signal arrives just
- * before we actually reach the waiting state. Also as with locks,
+ * before we actually reach the waiting state. Also as with locks,
* it's necessary that the caller be robust against bogus wakeups:
* always check that the desired state has occurred, and wait again
- * if not. This copes with possible "leftover" wakeups.
+ * if not. This copes with possible "leftover" wakeups.
*/
void
ProcWaitForSignal(void)
* interval will have elapsed and so this doesn't matter, but there
* are corner cases (involving multi-statement query strings with
* embedded COMMIT or ROLLBACK) where we might re-initialize the
- * statement timeout long after initial receipt of the message.
- * In such cases the enforcement of the statement timeout will be
- * a bit inconsistent. This annoyance is judged not worth the cost
- * of performing an additional gettimeofday() here.
+ * statement timeout long after initial receipt of the message. In
+ * such cases the enforcement of the statement timeout will be a bit
+ * inconsistent. This annoyance is judged not worth the cost of
+ * performing an additional gettimeofday() here.
*/
Assert(!deadlock_timeout_active);
fin_time = GetCurrentStatementStartTimestamp();
TimestampDifference(now, statement_fin_time,
&secs, &usecs);
+
/*
* It's possible that the difference is less than a microsecond;
* ensure we don't cancel, rather than set, the interrupt.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.46 2006/07/14 14:52:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.47 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* increase delay by a random fraction between 1X and 2X */
cur_delay += (int) (cur_delay *
- ((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
+ ((double) random() / (double) MAX_RANDOM_VALUE) + 0.5);
/* wrap back to minimum delay when max is exceeded */
if (cur_delay > MAX_DELAY_MSEC)
cur_delay = MIN_DELAY_MSEC;
asm(" .data");
}
#endif /* sun3 */
-
#endif /* not __GNUC__ */
#endif /* HAVE_SPINLOCKS */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.18 2006/08/25 04:06:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/itemptr.c,v 1.19 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
BlockNumber b1 = BlockIdGetBlockNumber(&(arg1->ip_blkid));
BlockNumber b2 = BlockIdGetBlockNumber(&(arg2->ip_blkid));
-
+
if (b1 < b2)
return -1;
else if (b1 > b2)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.121 2006/07/14 05:28:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.122 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* If in bgwriter, absorb pending requests every so often to
* prevent overflow of the fsync request queue. The hashtable
* code does not specify whether entries added by this will be
- * visited by our search, but we don't really care: it's OK if
- * we do, and OK if we don't.
+ * visited by our search, but we don't really care: it's OK if we
+ * do, and OK if we don't.
*/
if (--absorb_counter <= 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.100 2006/07/14 14:52:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.101 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
FreeSpaceMapForgetRel(&rnode);
/*
- * Tell the stats collector to forget it immediately, too. Skip this
- * in recovery mode, since the stats collector likely isn't running
- * (and if it is, pgstat.c will get confused because we aren't a real
- * backend process).
+ * Tell the stats collector to forget it immediately, too. Skip this in
+ * recovery mode, since the stats collector likely isn't running (and if
+ * it is, pgstat.c will get confused because we aren't a real backend
+ * process).
*/
if (!InRecovery)
pgstat_drop_relation(rnode.relNode);
xl_smgr_create *xlrec = (xl_smgr_create *) rec;
appendStringInfo(buf, "file create: %u/%u/%u",
- xlrec->rnode.spcNode, xlrec->rnode.dbNode,
- xlrec->rnode.relNode);
+ xlrec->rnode.spcNode, xlrec->rnode.dbNode,
+ xlrec->rnode.relNode);
}
else if (info == XLOG_SMGR_TRUNCATE)
{
xl_smgr_truncate *xlrec = (xl_smgr_truncate *) rec;
appendStringInfo(buf, "file truncate: %u/%u/%u to %u blocks",
- xlrec->rnode.spcNode, xlrec->rnode.dbNode,
- xlrec->rnode.relNode, xlrec->blkno);
+ xlrec->rnode.spcNode, xlrec->rnode.dbNode,
+ xlrec->rnode.relNode, xlrec->blkno);
}
else
appendStringInfo(buf, "UNKNOWN");
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.92 2006/09/08 15:55:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.93 2006/10/04 00:29:58 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
"commands ignored until end of transaction block")));
/*
- * Now that we know we are in a valid transaction, set snapshot in
- * case needed by function itself or one of the datatype I/O routines.
+ * Now that we know we are in a valid transaction, set snapshot in case
+ * needed by function itself or one of the datatype I/O routines.
*/
ActiveSnapshot = CopySnapshot(GetTransactionSnapshot());
* Begin parsing the buffer contents.
*/
if (PG_PROTOCOL_MAJOR(FrontendProtocol) < 3)
- (void) pq_getmsgstring(msgBuf); /* dummy string */
+ (void) pq_getmsgstring(msgBuf); /* dummy string */
fid = (Oid) pq_getmsgint(msgBuf, 4); /* function oid */
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
if (argsize < 0)
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("invalid argument size %d in function call message",
- argsize)));
+ errmsg("invalid argument size %d in function call message",
+ argsize)));
/* Reset abuf to empty, and insert raw data into it */
abuf.len = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.509 2006/09/13 21:59:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.510 2006/10/04 00:29:58 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
numPFormats = pq_getmsgint(input_message, 2);
if (numPFormats > 0)
{
- int i;
-
+ int i;
+
pformats = (int16 *) palloc(numPFormats * sizeof(int16));
for (i = 0; i < numPFormats; i++)
pformats[i] = pq_getmsgint(input_message, 2);
/* sizeof(ParamListInfoData) includes the first array element */
params = (ParamListInfo) palloc(sizeof(ParamListInfoData) +
- (numParams - 1) * sizeof(ParamExternData));
+ (numParams - 1) *sizeof(ParamExternData));
params->numParams = numParams;
paramno = 0;
if (pstring && pstring != pbuf.data)
pfree(pstring);
}
- else if (pformat == 1) /* binary mode */
+ else if (pformat == 1) /* binary mode */
{
Oid typreceive;
Oid typioparam;
params->params[paramno].value = pval;
params->params[paramno].isnull = isNull;
+
/*
* We mark the params as CONST. This has no effect if we already
* did planning, but if we didn't, it licenses the planner to
numRFormats = pq_getmsgint(input_message, 2);
if (numRFormats > 0)
{
- int i;
+ int i;
rformats = (int16 *) palloc(numRFormats * sizeof(int16));
for (i = 0; i < numRFormats; i++)
* to make use of the concrete parameter values we now have. Because we
* use PARAM_FLAG_CONST, the plan is good only for this set of param
* values, and so we generate the plan in the portal's own memory context
- * where it will be thrown away after use. As in exec_parse_message,
- * we make no attempt to recover planner temporary memory until the end
- * of the operation.
+ * where it will be thrown away after use. As in exec_parse_message, we
+ * make no attempt to recover planner temporary memory until the end of
+ * the operation.
*
- * XXX because the planner has a bad habit of scribbling on its input,
- * we have to make a copy of the parse trees, just in case someone binds
- * and executes an unnamed statement multiple times; this also means that
- * the portal's queryContext becomes its own heap context rather than the
+ * XXX because the planner has a bad habit of scribbling on its input, we
+ * have to make a copy of the parse trees, just in case someone binds and
+ * executes an unnamed statement multiple times; this also means that the
+ * portal's queryContext becomes its own heap context rather than the
* prepared statement's context. FIXME someday
*/
if (pstmt->plan_list == NIL && pstmt->query_list != NIL)
execute_is_fetch = !portal->atStart;
/*
- * We must copy the sourceText and prepStmtName into MessageContext
- * in case the portal is destroyed during finish_xact_command.
- * Can avoid the copy if it's not an xact command, though.
+ * We must copy the sourceText and prepStmtName into MessageContext in
+ * case the portal is destroyed during finish_xact_command. Can avoid the
+ * copy if it's not an xact command, though.
*/
if (is_xact_command)
{
prepStmtName = pstrdup(portal->prepStmtName);
else
prepStmtName = "";
+
/*
* An xact command shouldn't have any parameters, which is a good
* thing because they wouldn't be around after finish_xact_command.
msecs = usecs / 1000;
/*
- * This odd-looking test for log_min_duration_statement being
- * exceeded is designed to avoid integer overflow with very
- * long durations: don't compute secs * 1000 until we've
- * verified it will fit in int.
+ * This odd-looking test for log_min_duration_statement being exceeded
+ * is designed to avoid integer overflow with very long durations:
+ * don't compute secs * 1000 until we've verified it will fit in int.
*/
exceeded = (log_min_duration_statement == 0 ||
(log_min_duration_statement > 0 &&
appendStringInfoCharMacro(¶m_str, '\'');
for (p = pstring; *p; p++)
{
- if (*p == '\'') /* double single quotes */
+ if (*p == '\'') /* double single quotes */
appendStringInfoCharMacro(¶m_str, *p);
appendStringInfoCharMacro(¶m_str, *p);
}
/*
* If we are in aborted transaction state, we can't safely create a result
- * tupledesc, because that needs catalog accesses. Hence, refuse to
+ * tupledesc, because that needs catalog accesses. Hence, refuse to
* Describe statements that return data. (We shouldn't just refuse all
* Describes, since that might break the ability of some clients to issue
* COMMIT or ROLLBACK commands, if they use code that blindly Describes
/*
* If we are in aborted transaction state, we can't run
* SendRowDescriptionMessage(), because that needs catalog accesses.
- * Hence, refuse to Describe portals that return data. (We shouldn't just
+ * Hence, refuse to Describe portals that return data. (We shouldn't just
* refuse all Describes, since that might break the ability of some
* clients to issue COMMIT or ROLLBACK commands, if they use code that
* blindly Describes whatever it does.)
bool
set_plan_disabling_options(const char *arg, GucContext context, GucSource source)
{
- char *tmp = NULL;
+ char *tmp = NULL;
switch (arg[0])
{
- case 's': /* seqscan */
+ case 's': /* seqscan */
tmp = "enable_seqscan";
break;
- case 'i': /* indexscan */
+ case 'i': /* indexscan */
tmp = "enable_indexscan";
break;
- case 'b': /* bitmapscan */
+ case 'b': /* bitmapscan */
tmp = "enable_bitmapscan";
break;
- case 't': /* tidscan */
+ case 't': /* tidscan */
tmp = "enable_tidscan";
break;
- case 'n': /* nestloop */
+ case 'n': /* nestloop */
tmp = "enable_nestloop";
break;
- case 'm': /* mergejoin */
+ case 'm': /* mergejoin */
tmp = "enable_mergejoin";
break;
- case 'h': /* hashjoin */
+ case 'h': /* hashjoin */
tmp = "enable_hashjoin";
break;
}
switch (arg[0])
{
case 'p':
- if (optarg[1] == 'a') /* "parser" */
+ if (optarg[1] == 'a') /* "parser" */
return "log_parser_stats";
- else if (optarg[1] == 'l') /* "planner" */
+ else if (optarg[1] == 'l') /* "planner" */
return "log_planner_stats";
break;
- case 'e': /* "executor" */
+ case 'e': /* "executor" */
return "log_executor_stats";
break;
}
break;
case 's':
+
/*
* Since log options are SUSET, we need to postpone unless
* still in secure context
break;
case 't':
- {
- const char *tmp = get_stats_option_name(optarg);
- if (tmp)
{
- if (ctx == PGC_BACKEND)
- PendingConfigOption(tmp, "true");
+ const char *tmp = get_stats_option_name(optarg);
+
+ if (tmp)
+ {
+ if (ctx == PGC_BACKEND)
+ PendingConfigOption(tmp, "true");
+ else
+ SetConfigOption(tmp, "true", ctx, gucsource);
+ }
else
- SetConfigOption(tmp, "true", ctx, gucsource);
+ errs++;
+ break;
}
- else
- errs++;
- break;
- }
case 'v':
if (secure)
case 'y':
+
/*
* y - special flag passed if backend was forked by a
* postmaster.
}
/*
- * Create a per-backend PGPROC struct in shared memory, except in
- * the EXEC_BACKEND case where this was done in SubPostmasterMain.
- * We must do this before we can use LWLocks (and in the EXEC_BACKEND
- * case we already had to do some stuff with LWLocks).
+ * Create a per-backend PGPROC struct in shared memory, except in the
+ * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do
+ * this before we can use LWLocks (and in the EXEC_BACKEND case we already
+ * had to do some stuff with LWLocks).
*/
#ifdef EXEC_BACKEND
if (!IsUnderPostmaster)
on_proc_exit(log_disconnections, 0);
/*
- * process any libraries that should be preloaded at backend start
- * (this likewise can't be done until GUC settings are complete)
+ * process any libraries that should be preloaded at backend start (this
+ * likewise can't be done until GUC settings are complete)
*/
process_local_preload_libraries();
PG_SETMASK(&UnBlockSig);
if (!ignore_till_sync)
- send_ready_for_query = true; /* initially, or after error */
+ send_ready_for_query = true; /* initially, or after error */
/*
* Non-error queries loop here.
/*
* Note: we may at this point be inside an aborted
- * transaction. We can't throw error for that until
- * we've finished reading the function-call message, so
+ * transaction. We can't throw error for that until we've
+ * finished reading the function-call message, so
* HandleFunctionRequest() must check for it after doing so.
* Be careful not to do anything that assumes we're inside a
* valid transaction here.
"user=%s database=%s host=%s%s%s",
hours, minutes, seconds, msecs,
port->user_name, port->database_name, port->remote_host,
- port->remote_port[0] ? " port=" : "", port->remote_port)));
+ port->remote_port[0] ? " port=" : "", port->remote_port)));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.110 2006/09/03 03:19:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.111 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* PORTAL_ONE_SELECT and PORTAL_UTIL_SELECT need only consider the
- * single-Query-struct case, since there are no rewrite rules that
- * can add auxiliary queries to a SELECT or a utility command.
+ * single-Query-struct case, since there are no rewrite rules that can add
+ * auxiliary queries to a SELECT or a utility command.
*/
if (list_length(parseTrees) == 1)
{
/*
* PORTAL_ONE_RETURNING has to allow auxiliary queries added by rewrite.
- * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query
- * and it has a RETURNING list.
+ * Choose PORTAL_ONE_RETURNING if there is exactly one canSetTag query and
+ * it has a RETURNING list.
*/
nSetTag = 0;
foreach(lc, parseTrees)
if (query->canSetTag)
{
if (++nSetTag > 1)
- return PORTAL_MULTI_QUERY; /* no need to look further */
+ return PORTAL_MULTI_QUERY; /* no need to look further */
if (query->returningList == NIL)
- return PORTAL_MULTI_QUERY; /* no need to look further */
+ return PORTAL_MULTI_QUERY; /* no need to look further */
}
}
if (nSetTag == 1)
if (portal->cursorOptions & CURSOR_OPT_SCROLL)
eflags = EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD;
else
- eflags = 0; /* default run-to-completion flags */
+ eflags = 0; /* default run-to-completion flags */
/*
* Call ExecutorStart to prepare the plan for execution
case PORTAL_ONE_RETURNING:
/*
- * We don't start the executor until we are told to run
- * the portal. We do need to set up the result tupdesc.
+ * We don't start the executor until we are told to run the
+ * portal. We do need to set up the result tupdesc.
*/
portal->tupDesc =
ExecCleanTypeFromTL((PortalGetPrimaryQuery(portal))->returningList, false);
case PORTAL_UTIL_SELECT:
/*
- * If we have not yet run the command, do so,
- * storing its results in the portal's tuplestore.
+ * If we have not yet run the command, do so, storing its
+ * results in the portal's tuplestore.
*/
if (!portal->holdStore)
FillPortalStore(portal);
switch (portal->strategy)
{
case PORTAL_ONE_RETURNING:
+
/*
- * Run the portal to completion just as for the default MULTI_QUERY
- * case, but send the primary query's output to the tuplestore.
- * Auxiliary query outputs are discarded.
+ * Run the portal to completion just as for the default
+ * MULTI_QUERY case, but send the primary query's output to the
+ * tuplestore. Auxiliary query outputs are discarded.
*/
PortalRunMulti(portal, treceiver, None_Receiver, completionTag);
/* Override default completion tag with actual command result */
case PORTAL_UTIL_SELECT:
/*
- * If we have not yet run the command, do so,
- * storing its results in the portal's tuplestore.
+ * If we have not yet run the command, do so, storing its
+ * results in the portal's tuplestore.
*/
if (!portal->holdStore)
FillPortalStore(portal);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.268 2006/09/07 22:52:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.269 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case T_CopyStmt:
{
- uint64 processed = DoCopy((CopyStmt *) parsetree);
+ uint64 processed = DoCopy((CopyStmt *) parsetree);
if (completionTag)
snprintf(completionTag, COMPLETION_TAG_BUFSIZE,
stmt->unique,
stmt->primary,
stmt->isconstraint,
- false, /* is_alter_table */
- true, /* check_rights */
- false, /* skip_build */
- false, /* quiet */
+ false, /* is_alter_table */
+ true, /* check_rights */
+ false, /* skip_build */
+ false, /* quiet */
stmt->concurrent); /* concurrent */
}
break;
case T_SelectStmt:
if (((SelectStmt *) parsetree)->into)
- lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
+ lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
else
lev = LOGSTMT_ALL;
break;
case T_ExplainStmt:
{
- ExplainStmt *stmt = (ExplainStmt *) parsetree;
+ ExplainStmt *stmt = (ExplainStmt *) parsetree;
/* Look through an EXPLAIN ANALYZE to the contained stmt */
if (stmt->analyze)
break;
case T_ReindexStmt:
- lev = LOGSTMT_ALL; /* should this be DDL? */
+ lev = LOGSTMT_ALL; /* should this be DDL? */
break;
case T_CreateConversionStmt:
case T_PrepareStmt:
{
- PrepareStmt *stmt = (PrepareStmt *) parsetree;
+ PrepareStmt *stmt = (PrepareStmt *) parsetree;
/* Look through a PREPARE to the contained stmt */
return GetCommandLogLevel((Node *) stmt->query);
case T_ExecuteStmt:
{
- ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
+ ExecuteStmt *stmt = (ExecuteStmt *) parsetree;
PreparedStatement *pstmt;
- ListCell *l;
+ ListCell *l;
/* Look through an EXECUTE to the referenced stmt(s) */
lev = LOGSTMT_ALL;
{
foreach(l, pstmt->query_list)
{
- Query *query = (Query *) lfirst(l);
+ Query *query = (Query *) lfirst(l);
LogStmtLevel stmt_lev;
stmt_lev = GetQueryLogLevel(query);
break;
case T_Query:
+
/*
* In complicated situations (eg, EXPLAIN ANALYZE in an extended
- * Query protocol), we might find an already-analyzed query
- * within a utility statement. Cope.
+ * Query protocol), we might find an already-analyzed query within
+ * a utility statement. Cope.
*/
lev = GetQueryLogLevel((Query *) parsetree);
break;
{
case CMD_SELECT:
if (parsetree->into != NULL)
- lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
+ lev = LOGSTMT_DDL; /* CREATE AS, SELECT INTO */
else
lev = LOGSTMT_ALL;
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.132 2006/09/29 21:22:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.133 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
dim[0] += addedbefore;
lb[0] = indx[0];
if (addedbefore > 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
}
if (indx[0] >= (dim[0] + lb[0]))
{
addedafter = indx[0] - (dim[0] + lb[0]) + 1;
dim[0] += addedafter;
if (addedafter > 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
}
}
else
{
/*
- * XXX currently we do not support extending multi-dimensional
- * arrays during assignment
+ * XXX currently we do not support extending multi-dimensional arrays
+ * during assignment
*/
for (i = 0; i < ndim; i++)
{
if (lowerIndx[0] < lb[0])
{
if (upperIndx[0] < lb[0] - 1)
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
addedbefore = lb[0] - lowerIndx[0];
dim[0] += addedbefore;
lb[0] = lowerIndx[0];
if (upperIndx[0] >= (dim[0] + lb[0]))
{
if (lowerIndx[0] > (dim[0] + lb[0]))
- newhasnulls = true; /* will insert nulls */
+ newhasnulls = true; /* will insert nulls */
addedafter = upperIndx[0] - (dim[0] + lb[0]) + 1;
dim[0] += addedafter;
}
else
{
/*
- * XXX currently we do not support extending multi-dimensional
- * arrays during assignment
+ * XXX currently we do not support extending multi-dimensional arrays
+ * during assignment
*/
for (i = 0; i < nSubscripts; i++)
{
if (lowerIndx[i] > upperIndx[i])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
if (lowerIndx[i] < lb[i] ||
upperIndx[i] >= (dim[i] + lb[i]))
ereport(ERROR,
if (lowerIndx[i] > upperIndx[i])
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
- errmsg("upper bound cannot be less than lower bound")));
+ errmsg("upper bound cannot be less than lower bound")));
}
}
/*
* We arrange to look up the equality function only once per series of
* calls, assuming the element type doesn't change underneath us. The
- * typcache is used so that we have no memory leakage when being used
- * as an index support function.
+ * typcache is used so that we have no memory leakage when being used as
+ * an index support function.
*/
typentry = (TypeCacheEntry *) *fn_extra;
if (typentry == NULL ||
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(element_type))));
*fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
}
/*
- * We assume that the comparison operator is strict, so a NULL
- * can't match anything. XXX this diverges from the "NULL=NULL"
- * behavior of array_eq, should we act like that?
+ * We assume that the comparison operator is strict, so a NULL can't
+ * match anything. XXX this diverges from the "NULL=NULL" behavior of
+ * array_eq, should we act like that?
*/
if (isnull1)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.171 2006/09/16 20:14:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.172 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
static datetkn *timezonetktbl = NULL;
-static int sztimezonetktbl = 0;
+static int sztimezonetktbl = 0;
static const datetkn datetktbl[] = {
/* text, token, lexval */
{YESTERDAY, RESERV, DTK_YESTERDAY}, /* yesterday midnight */
};
-static int szdatetktbl = sizeof datetktbl / sizeof datetktbl[0];
+static int szdatetktbl = sizeof datetktbl / sizeof datetktbl[0];
static datetkn deltatktbl[] = {
/* text, token, lexval */
{"yrs", UNITS, DTK_YEAR}, /* "years" relative */
};
-static int szdeltatktbl = sizeof deltatktbl / sizeof deltatktbl[0];
+static int szdeltatktbl = sizeof deltatktbl / sizeof deltatktbl[0];
static const datetkn *datecache[MAXDATEFIELDS] = {NULL};
if (*cp == '/')
{
ftype[nf] = DTK_TZ;
- /* set the first character of the region to upper case
- * again*/
+
+ /*
+ * set the first character of the region to upper case
+ * again
+ */
field[nf][0] = pg_toupper((unsigned char) field[nf][0]);
- /* we have seen "Region/" of a POSIX timezone, continue to
- * read the City part */
- do {
+
+ /*
+ * we have seen "Region/" of a POSIX timezone, continue to
+ * read the City part
+ */
+ do
+ {
APPEND_CHAR(bufp, bufend, *cp++);
/* there is for example America/New_York */
} while (isalpha((unsigned char) *cp) || *cp == '_');
if (zicTzFnum != -1)
{
- Datum tsTz;
- Timestamp timestamp;
+ Datum tsTz;
+ Timestamp timestamp;
+
tm2timestamp(tm, *fsec, NULL, ×tamp);
tsTz = DirectFunctionCall2(timestamp_zone,
- DirectFunctionCall1(textin,
- CStringGetDatum(field[zicTzFnum])),
- TimestampGetDatum(timestamp));
+ DirectFunctionCall1(textin,
+ CStringGetDatum(field[zicTzFnum])),
+ TimestampGetDatum(timestamp));
timestamp2tm(DatumGetTimestampTz(tsTz), tzp, tm, fsec, NULL, NULL);
fmask &= ~DTK_M(TZ);
}
tm->tm_mday += val * 7;
if (fval != 0)
{
- int extra_days;
+ int extra_days;
+
fval *= 7;
extra_days = (int32) fval;
tm->tm_mday += extra_days;
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
tm->tm_mon += val;
if (fval != 0)
{
- int day;
+ int day;
+
fval *= DAYS_PER_MONTH;
day = fval;
tm->tm_mday += day;
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
void
InstallTimeZoneAbbrevs(tzEntry *abbrevs, int n)
{
- datetkn *newtbl;
+ datetkn *newtbl;
int i;
/*
Datum
pg_timezone_abbrevs(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- int *pindex;
- Datum result;
- HeapTuple tuple;
- Datum values[3];
- bool nulls[3];
- char buffer[TOKMAXLEN + 1];
- unsigned char *p;
- struct pg_tm tm;
- Interval *resInterval;
+ FuncCallContext *funcctx;
+ int *pindex;
+ Datum result;
+ HeapTuple tuple;
+ Datum values[3];
+ bool nulls[3];
+ char buffer[TOKMAXLEN + 1];
+ unsigned char *p;
+ struct pg_tm tm;
+ Interval *resInterval;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
- MemoryContext oldcontext;
+ TupleDesc tupdesc;
+ MemoryContext oldcontext;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
MemSet(nulls, 0, sizeof(nulls));
/*
- * Convert name to text, using upcasing conversion that is the inverse
- * of what ParseDateTime() uses.
+ * Convert name to text, using upcasing conversion that is the inverse of
+ * what ParseDateTime() uses.
*/
strncpy(buffer, timezonetktbl[*pindex].token, TOKMAXLEN);
buffer[TOKMAXLEN] = '\0'; /* may not be null-terminated */
Datum
pg_timezone_names(PG_FUNCTION_ARGS)
{
- MemoryContext oldcontext;
- FuncCallContext *funcctx;
- pg_tzenum *tzenum;
- pg_tz *tz;
- Datum result;
- HeapTuple tuple;
- Datum values[4];
- bool nulls[4];
+ MemoryContext oldcontext;
+ FuncCallContext *funcctx;
+ pg_tzenum *tzenum;
+ pg_tz *tz;
+ Datum result;
+ HeapTuple tuple;
+ Datum values[4];
+ bool nulls[4];
int tzoff;
- struct pg_tm tm;
+ struct pg_tm tm;
fsec_t fsec;
char *tzn;
- Interval *resInterval;
- struct pg_tm itm;
+ Interval *resInterval;
+ struct pg_tm itm;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- TupleDesc tupdesc;
+ TupleDesc tupdesc;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
MemSet(nulls, 0, sizeof(nulls));
values[0] = DirectFunctionCall1(textin,
- CStringGetDatum(pg_get_timezone_name(tz)));
+ CStringGetDatum(pg_get_timezone_name(tz)));
values[1] = DirectFunctionCall1(textin,
CStringGetDatum(tzn ? tzn : ""));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.3 2006/08/04 21:33:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/domains.c,v 1.4 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Set up value to be returned by CoerceToDomainValue
* nodes. Unlike ExecEvalCoerceToDomain, this econtext
- * couldn't be shared with anything else, so no need
- * to save and restore fields.
+ * couldn't be shared with anything else, so no need to
+ * save and restore fields.
*/
econtext->domainValue_datum = value;
econtext->domainValue_isNull = isnull;
/*
* Before exiting, call any shutdown callbacks and reset econtext's
- * per-tuple memory. This avoids leaking non-memory resources,
- * if anything in the expression(s) has any.
+ * per-tuple memory. This avoids leaking non-memory resources, if
+ * anything in the expression(s) has any.
*/
if (econtext)
ReScanExprContext(econtext);
Datum value;
/*
- * Since domain_in is not strict, we have to check for null inputs.
- * The typioparam argument should never be null in normal system usage,
- * but it could be null in a manual invocation --- if so, just return null.
+ * Since domain_in is not strict, we have to check for null inputs. The
+ * typioparam argument should never be null in normal system usage, but it
+ * could be null in a manual invocation --- if so, just return null.
*/
if (PG_ARGISNULL(0))
string = NULL;
domainType = PG_GETARG_OID(1);
/*
- * We arrange to look up the needed info just once per series of
- * calls, assuming the domain type doesn't change underneath us.
+ * We arrange to look up the needed info just once per series of calls,
+ * assuming the domain type doesn't change underneath us.
*/
my_extra = (DomainIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
Datum value;
/*
- * Since domain_recv is not strict, we have to check for null inputs.
- * The typioparam argument should never be null in normal system usage,
- * but it could be null in a manual invocation --- if so, just return null.
+ * Since domain_recv is not strict, we have to check for null inputs. The
+ * typioparam argument should never be null in normal system usage, but it
+ * could be null in a manual invocation --- if so, just return null.
*/
if (PG_ARGISNULL(0))
buf = NULL;
domainType = PG_GETARG_OID(1);
/*
- * We arrange to look up the needed info just once per series of
- * calls, assuming the domain type doesn't change underneath us.
+ * We arrange to look up the needed info just once per series of calls,
+ * assuming the domain type doesn't change underneath us.
*/
my_extra = (DomainIOData *) fcinfo->flinfo->fn_extra;
if (my_extra == NULL)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.128 2006/07/28 18:33:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.129 2006/10/04 00:29:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define HAVE_FINITE 1
#endif
-/* Visual C++ etc lacks NAN, and won't accept 0.0/0.0. NAN definition from
+/* Visual C++ etc lacks NAN, and won't accept 0.0/0.0. NAN definition from
* http://msdn.microsoft.com/library/default.asp?url=/library/en-us/vclang/html/vclrfNotNumberNANItems.asp
*/
#if defined(WIN32) && !defined(NAN)
static const uint32 nan[2] = {0xffffffff, 0x7fffffff};
+
#define NAN (*(const double *) nan)
#endif
* in that order. Note that Y is the first argument to the aggregates!
*
* It might seem attractive to optimize this by having multiple accumulator
- * functions that only calculate the sums actually needed. But on most
+ * functions that only calculate the sums actually needed. But on most
* modern machines, a couple of extra floating-point multiplies will be
* insignificant compared to the other per-tuple overhead, so I've chosen
* to minimize code space instead.
float8 newvalY = PG_GETARG_FLOAT8(1);
float8 newvalX = PG_GETARG_FLOAT8(2);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY;
transvalues = check_float8_array(transarray, "float8_regr_accum", 6);
N = transvalues[0];
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_regr_sxy", 6);
N = transvalues[0];
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_covar_pop", 6);
N = transvalues[0];
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumY, sumXY, numerator;
+ float8 N,
+ sumX,
+ sumY,
+ sumXY,
+ numerator;
transvalues = check_float8_array(transarray, "float8_covar_samp", 6);
N = transvalues[0];
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY, numeratorX,
- numeratorY, numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY,
+ numeratorX,
+ numeratorY,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_corr", 6);
N = transvalues[0];
numeratorXY = N * sumXY - sumX * sumY;
if (numeratorX <= 0 || numeratorY <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(sqrt((numeratorXY * numeratorXY) /
(numeratorX * numeratorY)));
}
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumY2, sumXY, numeratorX,
- numeratorY, numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumY2,
+ sumXY,
+ numeratorX,
+ numeratorY,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_regr_r2", 6);
N = transvalues[0];
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumXY, numeratorX,
- numeratorXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumXY,
+ numeratorX,
+ numeratorXY;
transvalues = check_float8_array(transarray, "float8_regr_slope", 6);
N = transvalues[0];
numeratorXY = N * sumXY - sumX * sumY;
if (numeratorX <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(numeratorXY / numeratorX);
}
{
ArrayType *transarray = PG_GETARG_ARRAYTYPE_P(0);
float8 *transvalues;
- float8 N, sumX, sumX2, sumY, sumXY, numeratorX,
- numeratorXXY;
+ float8 N,
+ sumX,
+ sumX2,
+ sumY,
+ sumXY,
+ numeratorX,
+ numeratorXXY;
transvalues = check_float8_array(transarray, "float8_regr_intercept", 6);
N = transvalues[0];
numeratorXXY = sumY * sumX2 - sumX * sumXY;
if (numeratorX <= 0)
PG_RETURN_NULL();
-
+
PG_RETURN_FLOAT8(numeratorXXY / numeratorX);
}
double tmpres = pow(absx, (double) 1.0 / (double) 3.0);
/*
- * The result is somewhat inaccurate --- not really pow()'s fault,
- * as the exponent it's handed contains roundoff error. We can improve
- * the accuracy by doing one iteration of Newton's formula. Beware of
- * zero input however.
+ * The result is somewhat inaccurate --- not really pow()'s fault, as the
+ * exponent it's handed contains roundoff error. We can improve the
+ * accuracy by doing one iteration of Newton's formula. Beware of zero
+ * input however.
*/
if (tmpres > 0.0)
- tmpres -= (tmpres - absx/(tmpres*tmpres)) / (double) 3.0;
+ tmpres -= (tmpres - absx / (tmpres * tmpres)) / (double) 3.0;
return isneg ? -tmpres : tmpres;
}
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.112 2006/09/10 22:54:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.113 2006/10/04 00:29:59 momjian Exp $
*
*
* Portions Copyright (c) 1999-2006, PostgreSQL Global Development Group
#include "utils/numeric.h"
#include "utils/pg_locale.h"
-#define _(x) gettext((x))
+#define _(x) gettext((x))
/* ----------
* Routines type
len = strspace_len(str);
p += len;
-
+
while (*p && isdigit((unsigned char) *p) && len <= DCH_MAX_ITEM_SIZ)
{
len++;
{
sprintf(inout, "%0*d", S_FM(suf) ? 0 : 2,
tm->tm_hour % (HOURS_PER_DAY / 2) == 0 ? 12 :
- tm->tm_hour % (HOURS_PER_DAY / 2));
+ tm->tm_hour % (HOURS_PER_DAY / 2));
if (S_THth(suf))
str_numth(p_inout, inout, 0);
return strlen(p_inout);
tmfc = (TmFromChar *) data;
/*
- * In the FROM-char there is no difference between "January" or "JANUARY" or
- * "january", all is before search convert to "first-upper". This
+ * In the FROM-char there is no difference between "January" or "JANUARY"
+ * or "january", all is before search convert to "first-upper". This
* convention is used for MONTH, MON, DAY, DY
*/
if (!is_to_char)
static char *
localize_month_full(int index)
{
- char *m = NULL;
+ char *m = NULL;
switch (index)
{
static char *
localize_month(int index)
{
- char *m = NULL;
+ char *m = NULL;
switch (index)
{
static char *
localize_day_full(int index)
{
- char *d = NULL;
+ char *d = NULL;
switch (index)
{
static char *
localize_day(int index)
{
- char *d = NULL;
+ char *d = NULL;
switch (index)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.74 2006/06/12 16:28:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.75 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int2vector *result;
/*
- * Normally one would call array_recv() using DirectFunctionCall3,
- * but that does not work since array_recv wants to cache some data
- * using fcinfo->flinfo->fn_extra. So we need to pass it our own
- * flinfo parameter.
+ * Normally one would call array_recv() using DirectFunctionCall3, but
+ * that does not work since array_recv wants to cache some data using
+ * fcinfo->flinfo->fn_extra. So we need to pass it our own flinfo
+ * parameter.
*/
InitFunctionCallInfoData(locfcinfo, fcinfo->flinfo, 3, NULL, NULL);
int32 result;
#ifdef WIN32
+
/*
- * Win32 doesn't throw a catchable exception for
- * SELECT -2147483648 * (-1); -- INT_MIN
+ * Win32 doesn't throw a catchable exception for SELECT -2147483648 *
+ * (-1); -- INT_MIN
*/
if (arg2 == -1 && arg1 == INT_MIN)
ereport(ERROR,
errmsg("division by zero")));
#ifdef WIN32
+
/*
- * Win32 doesn't throw a catchable exception for
- * SELECT -2147483648 / (-1); -- INT_MIN
+ * Win32 doesn't throw a catchable exception for SELECT -2147483648 /
+ * (-1); -- INT_MIN
*/
if (arg2 == -1 && arg1 == INT_MIN)
ereport(ERROR,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.61 2006/07/28 18:33:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.62 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* These functions are exactly like int8inc but are used for aggregates that
- * count only non-null values. Since the functions are declared strict,
+ * count only non-null values. Since the functions are declared strict,
* the null checks happen before we ever get here, and all we need do is
* increment the state value. We could actually make these pg_proc entries
* point right at int8inc, but then the opr_sanity regression test would
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.65 2006/09/04 18:32:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/like.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Formerly we had a routine iwchareq() here that tried to do case-insensitive
- * comparison of multibyte characters. It did not work at all, however,
+ * comparison of multibyte characters. It did not work at all, however,
* because it relied on tolower() which has a single-byte API ... and
* towlower() wouldn't be much better since we have no suitably cheap way
* of getting a single character transformed to the system's wchar_t format.
* So now, we just downcase the strings using lower() and apply regular LIKE
- * comparison. This should be revisited when we install better locale support.
+ * comparison. This should be revisited when we install better locale support.
*
* Note that MBMatchText and MBMatchTextIC do exactly the same thing now.
* Is it worth refactoring to avoid duplicated code? They might become
else
{
/* Force inputs to lower case to achieve case insensitivity */
- text *strtext;
+ text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
NameGetDatum(str)));
strtext = DatumGetTextP(DirectFunctionCall1(lower,
- PointerGetDatum(strtext)));
+ PointerGetDatum(strtext)));
pat = DatumGetTextP(DirectFunctionCall1(lower,
PointerGetDatum(pat)));
else
{
/* Force inputs to lower case to achieve case insensitivity */
- text *strtext;
+ text *strtext;
strtext = DatumGetTextP(DirectFunctionCall1(name_text,
NameGetDatum(str)));
strtext = DatumGetTextP(DirectFunctionCall1(lower,
- PointerGetDatum(strtext)));
+ PointerGetDatum(strtext)));
pat = DatumGetTextP(DirectFunctionCall1(lower,
PointerGetDatum(pat)));
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.26 2006/09/22 23:20:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/lockfuncs.c,v 1.27 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
nulls[10] = 'n';
values[11] = DirectFunctionCall1(textin,
- CStringGetDatum(GetLockmodeName(LOCK_LOCKMETHOD(*lock),
- mode)));
+ CStringGetDatum(GetLockmodeName(LOCK_LOCKMETHOD(*lock),
+ mode)));
values[12] = BoolGetDatum(granted);
tuple = heap_formtuple(funcctx->tuple_desc, values, nulls);
}
/*
- * pg_advisory_unlock(int8) - release exclusive lock on an int8 key
+ * pg_advisory_unlock(int8) - release exclusive lock on an int8 key
*
* Returns true if successful, false if lock was not held
*/
}
/*
- * pg_advisory_unlock(int4, int4) - release exclusive lock on 2 int4 keys
+ * pg_advisory_unlock(int4, int4) - release exclusive lock on 2 int4 keys
*
* Returns true if successful, false if lock was not held
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.53 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.54 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
float8 endtime;
/*
- * We break the requested sleep into segments of no more than 1 second,
- * to put an upper bound on how long it will take us to respond to a
- * cancel or die interrupt. (Note that pg_usleep is interruptible by
- * signals on some platforms but not others.) Also, this method avoids
- * exposing pg_usleep's upper bound on allowed delays.
+ * We break the requested sleep into segments of no more than 1 second, to
+ * put an upper bound on how long it will take us to respond to a cancel
+ * or die interrupt. (Note that pg_usleep is interruptible by signals on
+ * some platforms but not others.) Also, this method avoids exposing
+ * pg_usleep's upper bound on allowed delays.
*
- * By computing the intended stop time initially, we avoid accumulation
- * of extra delay across multiple sleeps. This also ensures we won't
- * delay less than the specified time if pg_usleep is interrupted
- * by other signals such as SIGHUP.
+ * By computing the intended stop time initially, we avoid accumulation of
+ * extra delay across multiple sleeps. This also ensures we won't delay
+ * less than the specified time if pg_usleep is interrupted by other
+ * signals such as SIGHUP.
*/
#ifdef HAVE_INT64_TIMESTAMP
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.65 2006/02/11 20:39:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
* family, bits, is_cidr, address length, address in network byte order.
*
* Presence of is_cidr is largely for historical reasons, though it might
- * allow some code-sharing on the client side. We send it correctly on
+ * allow some code-sharing on the client side. We send it correctly on
* output, but ignore the value on input.
*/
static inet *
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid address family in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid bits in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
ip_bits(addr) = bits;
if (nb != ip_addrsize(addr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- /* translator: %s is inet or cidr */
+ /* translator: %s is inet or cidr */
errmsg("invalid length in external \"%s\" value",
is_cidr ? "cidr" : "inet")));
VARATT_SIZEP(addr) = VARHDRSZ +
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = ~pip[nb];
errmsg("cannot AND inet values of different sizes")));
else
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = pip[nb] & pip2[nb];
errmsg("cannot OR inet values of different sizes")));
else
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- unsigned char *pdst = ip_addr(dst);
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ unsigned char *pdst = ip_addr(dst);
while (nb-- > 0)
pdst[nb] = pip[nb] | pip2[nb];
dst = (inet *) palloc0(VARHDRSZ + sizeof(inet_struct));
{
- int nb = ip_addrsize(ip);
- unsigned char *pip = ip_addr(ip);
- unsigned char *pdst = ip_addr(dst);
- int carry = 0;
+ int nb = ip_addrsize(ip);
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pdst = ip_addr(dst);
+ int carry = 0;
while (nb-- > 0)
{
carry = pip[nb] + (int) (addend & 0xFF) + carry;
pdst[nb] = (unsigned char) (carry & 0xFF);
carry >>= 8;
+
/*
* We have to be careful about right-shifting addend because
- * right-shift isn't portable for negative values, while
- * simply dividing by 256 doesn't work (the standard rounding
- * is in the wrong direction, besides which there may be machines
- * out there that round the wrong way). So, explicitly clear
- * the low-order byte to remove any doubt about the correct
- * result of the division, and then divide rather than shift.
+ * right-shift isn't portable for negative values, while simply
+ * dividing by 256 doesn't work (the standard rounding is in the
+ * wrong direction, besides which there may be machines out there
+ * that round the wrong way). So, explicitly clear the low-order
+ * byte to remove any doubt about the correct result of the
+ * division, and then divide rather than shift.
*/
addend &= ~((int64) 0xFF);
addend /= 0x100;
}
+
/*
- * At this point we should have addend and carry both zero if
- * original addend was >= 0, or addend -1 and carry 1 if original
- * addend was < 0. Anything else means overflow.
+ * At this point we should have addend and carry both zero if original
+ * addend was >= 0, or addend -1 and carry 1 if original addend was <
+ * 0. Anything else means overflow.
*/
if (!((addend == 0 && carry == 0) ||
(addend == -1 && carry == 1)))
Datum
inetpl(PG_FUNCTION_ARGS)
{
- inet *ip = PG_GETARG_INET_P(0);
- int64 addend = PG_GETARG_INT64(1);
+ inet *ip = PG_GETARG_INET_P(0);
+ int64 addend = PG_GETARG_INT64(1);
PG_RETURN_INET_P(internal_inetpl(ip, addend));
}
Datum
inetmi_int8(PG_FUNCTION_ARGS)
{
- inet *ip = PG_GETARG_INET_P(0);
- int64 addend = PG_GETARG_INT64(1);
+ inet *ip = PG_GETARG_INET_P(0);
+ int64 addend = PG_GETARG_INT64(1);
PG_RETURN_INET_P(internal_inetpl(ip, -addend));
}
else
{
/*
- * We form the difference using the traditional complement,
- * increment, and add rule, with the increment part being handled
- * by starting the carry off at 1. If you don't think integer
- * arithmetic is done in two's complement, too bad.
+ * We form the difference using the traditional complement, increment,
+ * and add rule, with the increment part being handled by starting the
+ * carry off at 1. If you don't think integer arithmetic is done in
+ * two's complement, too bad.
*/
- int nb = ip_addrsize(ip);
- int byte = 0;
- unsigned char *pip = ip_addr(ip);
- unsigned char *pip2 = ip_addr(ip2);
- int carry = 1;
+ int nb = ip_addrsize(ip);
+ int byte = 0;
+ unsigned char *pip = ip_addr(ip);
+ unsigned char *pip2 = ip_addr(ip2);
+ int carry = 1;
while (nb-- > 0)
{
- int lobyte;
+ int lobyte;
carry = pip[nb] + (~pip2[nb] & 0xFF) + carry;
lobyte = carry & 0xFF;
else
{
/*
- * Input wider than int64: check for overflow. All bytes
- * to the left of what will fit should be 0 or 0xFF,
- * depending on sign of the now-complete result.
+ * Input wider than int64: check for overflow. All bytes to
+ * the left of what will fit should be 0 or 0xFF, depending on
+ * sign of the now-complete result.
*/
if ((res < 0) ? (lobyte != 0xFF) : (lobyte != 0))
ereport(ERROR,
}
/*
- * If input is narrower than int64, overflow is not possible, but
- * we have to do proper sign extension.
+ * If input is narrower than int64, overflow is not possible, but we
+ * have to do proper sign extension.
*/
if (carry == 0 && byte < sizeof(int64))
res |= ((int64) -1) << (byte * 8);
* Copyright (c) 1998-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.95 2006/10/03 21:25:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.96 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int cmp_numerics(Numeric num1, Numeric num2);
static int cmp_var(NumericVar *var1, NumericVar *var2);
-static int cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
- int var1weight, int var1sign,
- const NumericDigit *var2digits, int var2ndigits,
- int var2weight, int var2sign);
+static int cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
+ int var1weight, int var1sign,
+ const NumericDigit *var2digits, int var2ndigits,
+ int var2weight, int var2sign);
static void add_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void sub_var(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void mul_var(NumericVar *var1, NumericVar *var2, NumericVar *result,
int rscale);
static int cmp_abs(NumericVar *var1, NumericVar *var2);
-static int cmp_abs_common(const NumericDigit *var1digits, int var1ndigits,
- int var1weight,
- const NumericDigit *var2digits, int var2ndigits,
- int var2weight);
+static int cmp_abs_common(const NumericDigit *var1digits, int var1ndigits,
+ int var1weight,
+ const NumericDigit *var2digits, int var2ndigits,
+ int var2weight);
static void add_abs(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void sub_abs(NumericVar *var1, NumericVar *var2, NumericVar *result);
static void round_var(NumericVar *var, int rscale);
set_var_from_num(N, &vN);
/*
- * Sample stddev and variance are undefined when N <= 1;
- * population stddev is undefined when N == 0. Return NULL in
- * either case.
+ * Sample stddev and variance are undefined when N <= 1; population stddev
+ * is undefined when N == 0. Return NULL in either case.
*/
if (sample)
comp = &const_one;
mul_var(&vsumX, &vsumX, &vsumX, rscale); /* vsumX = sumX * sumX */
mul_var(&vN, &vsumX2, &vsumX2, rscale); /* vsumX2 = N * sumX2 */
- sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
+ sub_var(&vsumX2, &vsumX, &vsumX2); /* N * sumX2 - sumX * sumX */
if (cmp_var(&vsumX2, &const_zero) <= 0)
{
}
else
{
- mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
+ mul_var(&vN, &vNminus1, &vNminus1, 0); /* N * (N - 1) */
rscale = select_div_scale(&vsumX2, &vNminus1);
- div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
+ div_var(&vsumX2, &vNminus1, &vsumX, rscale, true); /* variance */
if (!variance)
- sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
+ sqrt_var(&vsumX, &vsumX, rscale); /* stddev */
res = make_result(&vsumX);
}
Datum
numeric_var_samp(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
true, true, &is_null);
Datum
numeric_stddev_samp(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
false, true, &is_null);
Datum
numeric_var_pop(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
true, false, &is_null);
Datum
numeric_stddev_pop(PG_FUNCTION_ARGS)
{
- Numeric res;
- bool is_null;
+ Numeric res;
+ bool is_null;
res = numeric_stddev_internal(PG_GETARG_ARRAYTYPE_P(0),
false, false, &is_null);
errmsg("numeric field overflow"),
errdetail("A field with precision %d, scale %d must round to an absolute value less than %s%d.",
precision, scale,
- /* Display 10^0 as 1 */
+ /* Display 10^0 as 1 */
maxdigits ? "10^" : "",
maxdigits ? maxdigits : 1
)));
/*
* cmp_var_common() -
*
- * Main routine of cmp_var(). This function can be used by both
- * NumericVar and Numeric.
+ * Main routine of cmp_var(). This function can be used by both
+ * NumericVar and Numeric.
*/
static int
cmp_var_common(const NumericDigit *var1digits, int var1ndigits,
/* ----------
* cmp_abs_common() -
*
- * Main routine of cmp_abs(). This function can be used by both
- * NumericVar and Numeric.
+ * Main routine of cmp_abs(). This function can be used by both
+ * NumericVar and Numeric.
* ----------
*/
static int
cmp_abs_common(const NumericDigit *var1digits, int var1ndigits, int var1weight,
- const NumericDigit *var2digits, int var2ndigits, int var2weight)
+ const NumericDigit *var2digits, int var2ndigits, int var2weight)
{
int i1 = 0;
int i2 = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.68 2006/03/05 15:58:43 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.69 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
oidvector *result;
/*
- * Normally one would call array_recv() using DirectFunctionCall3,
- * but that does not work since array_recv wants to cache some data
- * using fcinfo->flinfo->fn_extra. So we need to pass it our own
- * flinfo parameter.
+ * Normally one would call array_recv() using DirectFunctionCall3, but
+ * that does not work since array_recv wants to cache some data using
+ * fcinfo->flinfo->fn_extra. So we need to pass it our own flinfo
+ * parameter.
*/
InitFunctionCallInfoData(locfcinfo, fcinfo->flinfo, 3, NULL, NULL);
*
* Portions Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.36 2006/06/03 17:36:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.37 2006/10/04 00:29:59 momjian Exp $
*
*-----------------------------------------------------------------------
*/
static char lc_collate_envbuf[LC_ENV_BUFSIZE];
static char lc_ctype_envbuf[LC_ENV_BUFSIZE];
+
#ifdef LC_MESSAGES
static char lc_messages_envbuf[LC_ENV_BUFSIZE];
#endif
char *
pg_perm_setlocale(int category, const char *locale)
{
- char *result;
+ char *result;
const char *envvar;
- char *envbuf;
+ char *envbuf;
#ifndef WIN32
result = setlocale(category, locale);
#else
+
/*
- * On Windows, setlocale(LC_MESSAGES) does not work, so just assume
- * that the given value is good and set it in the environment variables.
- * We must ignore attempts to set to "", which means "keep using the
- * old environment value".
+ * On Windows, setlocale(LC_MESSAGES) does not work, so just assume that
+ * the given value is good and set it in the environment variables. We
+ * must ignore attempts to set to "", which means "keep using the old
+ * environment value".
*/
#ifdef LC_MESSAGES
if (category == LC_MESSAGES)
else
#endif
result = setlocale(category, locale);
-#endif /* WIN32 */
+#endif /* WIN32 */
if (result == NULL)
return result; /* fall out immediately on failure */
break;
}
- snprintf(envbuf, LC_ENV_BUFSIZE-1, "%s=%s", envvar, result);
+ snprintf(envbuf, LC_ENV_BUFSIZE - 1, "%s=%s", envvar, result);
#ifndef WIN32
if (putenv(envbuf))
return NULL;
#else
+
/*
* On Windows, we need to modify both the process environment and the
* cached version in msvcrt
/*
* LC_MESSAGES category does not exist everywhere, but accept it anyway
*
- * On Windows, we can't even check the value, so the non-doit case
- * is a no-op
+ * On Windows, we can't even check the value, so the non-doit case is a
+ * no-op
*/
#ifdef LC_MESSAGES
if (doit)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.33 2006/08/19 01:36:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pgstatfuncs.c,v 1.34 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pg_stat_get_last_vacuum_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
pg_stat_get_last_autovacuum_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
pg_stat_get_last_analyze_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
pg_stat_get_last_autoanalyze_time(PG_FUNCTION_ARGS)
{
Oid relid = PG_GETARG_OID(0);
- TimestampTz result;
+ TimestampTz result;
PgStat_StatTabEntry *tabentry;
if ((tabentry = pgstat_fetch_stat_tabentry(relid)) == NULL)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.65 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regexp.c,v 1.66 2006/10/04 00:29:59 momjian Exp $
*
* Alistair Crooks added the code for the regex caching
* agc - cached the regular expressions used - there's a good chance
* which is bizarre enough to require some explanation. "***:" is a
* director prefix to force the regex to be treated as an ARE regardless
* of the current regex_flavor setting. We need "^" and "$" to force
- * the pattern to match the entire input string as per SQL99 spec. The
+ * the pattern to match the entire input string as per SQL99 spec. The
* "(?:" and ")" are a non-capturing set of parens; we have to have
* parens in case the string contains "|", else the "^" and "$" will
* be bound into the first and last alternatives which is not what we
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.88 2006/08/27 21:41:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.89 2006/10/04 00:29:59 momjian Exp $
*
* ----------
*/
/*
* We should not even consider checking the row if it is no longer valid,
* since it was either deleted (so the deferred check should be skipped)
- * or updated (in which case only the latest version of the row should
- * be checked). Test its liveness with HeapTupleSatisfiesItself.
+ * or updated (in which case only the latest version of the row should be
+ * checked). Test its liveness with HeapTupleSatisfiesItself.
*
* NOTE: The normal coding rule is that one must acquire the buffer
- * content lock to call HeapTupleSatisfiesFOO. We can skip that here
+ * content lock to call HeapTupleSatisfiesFOO. We can skip that here
* because we know that AfterTriggerExecute just fetched the tuple
- * successfully, so there cannot be a VACUUM compaction in progress
- * on the page (either heap_fetch would have waited for the VACUUM,
- * or the VACUUM's LockBufferForCleanup would be waiting for us to drop
- * pin). And since this is a row inserted by our open transaction,
- * no one else can be entitled to change its xmin/xmax.
+ * successfully, so there cannot be a VACUUM compaction in progress on the
+ * page (either heap_fetch would have waited for the VACUUM, or the
+ * VACUUM's LockBufferForCleanup would be waiting for us to drop pin).
+ * And since this is a row inserted by our open transaction, no one else
+ * can be entitled to change its xmin/xmax.
*/
Assert(new_row_buf != InvalidBuffer);
if (!HeapTupleSatisfiesItself(new_row->t_data, new_row_buf))
* ruleutils.c - Functions to convert stored expressions/querytrees
* back to source text
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.233 2006/10/01 17:23:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.234 2006/10/04 00:29:59 momjian Exp $
**********************************************************************/
#include "postgres.h"
static void get_basic_select_query(Query *query, deparse_context *context,
TupleDesc resultDesc);
static void get_target_list(List *targetList, deparse_context *context,
- TupleDesc resultDesc);
+ TupleDesc resultDesc);
static void get_setop_query(Node *setOp, Query *query,
deparse_context *context,
TupleDesc resultDesc);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
StringInfo buf);
static Node *processIndirection(Node *node, deparse_context *context,
- bool printit);
+ bool printit);
static void printSubscripts(ArrayRef *aref, deparse_context *context);
static char *generate_relation_name(Oid relid);
static char *generate_function_name(Oid funcid, int nargs, Oid *argtypes);
{
if (i > 0)
appendStringInfo(&buf, ", ");
+
/*
* We form the string literal according to the prevailing setting
- * of standard_conforming_strings; we never use E''.
- * User is responsible for making sure result is used correctly.
+ * of standard_conforming_strings; we never use E''. User is
+ * responsible for making sure result is used correctly.
*/
appendStringInfoChar(&buf, '\'');
while (*p)
if (fullCommand && OidIsValid(conForm->conrelid))
{
- char *options = flatten_reloptions(conForm->conrelid);
+ char *options = flatten_reloptions(conForm->conrelid);
if (options)
{
RangeTblEntry *rte = makeNode(RangeTblEntry);
/*
- * We create an RTE_SPECIAL RangeTblEntry, and store the subplan in
- * its funcexpr field. RTE_SPECIAL nodes shouldn't appear in
- * deparse contexts otherwise.
+ * We create an RTE_SPECIAL RangeTblEntry, and store the subplan in its
+ * funcexpr field. RTE_SPECIAL nodes shouldn't appear in deparse contexts
+ * otherwise.
*/
rte->rtekind = RTE_SPECIAL;
rte->relid = InvalidOid;
appendStringInfoChar(buf, '(');
foreach(lc, sublist)
{
- Node *col = (Node *) lfirst(lc);
+ Node *col = (Node *) lfirst(lc);
if (first_col)
first_col = false;
}
/*
- * If the query looks like SELECT * FROM (VALUES ...), then print just
- * the VALUES part. This reverses what transformValuesClause() did at
- * parse time. If the jointree contains just a single VALUES RTE,
- * we assume this case applies (without looking at the targetlist...)
+ * If the query looks like SELECT * FROM (VALUES ...), then print just the
+ * VALUES part. This reverses what transformValuesClause() did at parse
+ * time. If the jointree contains just a single VALUES RTE, we assume
+ * this case applies (without looking at the targetlist...)
*/
if (list_length(query->jointree->fromlist) == 1)
{
colno++;
/*
- * We special-case Var nodes rather than using get_rule_expr.
- * This is needed because get_rule_expr will display a whole-row Var
- * as "foo.*", which is the preferred notation in most contexts, but
- * at the top level of a SELECT list it's not right (the parser will
+ * We special-case Var nodes rather than using get_rule_expr. This is
+ * needed because get_rule_expr will display a whole-row Var as
+ * "foo.*", which is the preferred notation in most contexts, but at
+ * the top level of a SELECT list it's not right (the parser will
* expand that notation into multiple columns, yielding behavior
* different from a whole-row Var). We want just "foo", instead.
*/
List *strippedexprs;
/*
- * If it's an INSERT ... SELECT or VALUES (...), (...), ...
- * there will be a single RTE for the SELECT or VALUES.
+ * If it's an INSERT ... SELECT or VALUES (...), (...), ... there will be
+ * a single RTE for the SELECT or VALUES.
*/
foreach(l, query->rtable)
{
elog(ERROR, "too many subquery RTEs in INSERT");
select_rte = rte;
}
-
+
if (rte->rtekind == RTE_VALUES)
{
if (values_rte)
generate_relation_name(rte->relid));
/*
- * Add the insert-column-names list. To handle indirection properly,
- * we need to look for indirection nodes in the top targetlist (if it's
+ * Add the insert-column-names list. To handle indirection properly, we
+ * need to look for indirection nodes in the top targetlist (if it's
* INSERT ... SELECT or INSERT ... single VALUES), or in the first
- * expression list of the VALUES RTE (if it's INSERT ... multi VALUES).
- * We assume that all the expression lists will have similar indirection
- * in the latter case.
+ * expression list of the VALUES RTE (if it's INSERT ... multi VALUES). We
+ * assume that all the expression lists will have similar indirection in
+ * the latter case.
*/
if (values_rte)
values_cell = list_head((List *) linitial(values_rte->values_lists));
/*
* Try to find the relevant RTE in this rtable. In a plan tree, it's
- * likely that varno is OUTER, INNER, or 0, in which case we try to
- * use varnoold instead. If the Var references an expression computed
- * by a subplan, varnoold will be 0, and we fall back to looking at the
- * special subplan RTEs.
+ * likely that varno is OUTER, INNER, or 0, in which case we try to use
+ * varnoold instead. If the Var references an expression computed by a
+ * subplan, varnoold will be 0, and we fall back to looking at the special
+ * subplan RTEs.
*/
if (var->varno >= 1 && var->varno <= list_length(dpns->rtable))
rte = rt_fetch(var->varno, dpns->rtable);
/*
* This case occurs during EXPLAIN when we are looking at a
* deparse context node set up by deparse_context_for_subplan().
- * If the subplan tlist provides a name, use it, but usually
- * we'll end up with "?columnN?".
+ * If the subplan tlist provides a name, use it, but usually we'll
+ * end up with "?columnN?".
*/
- List *tlist = ((Plan *) rte->funcexpr)->targetlist;
+ List *tlist = ((Plan *) rte->funcexpr)->targetlist;
TargetEntry *tle = get_tle_by_resno(tlist, attnum);
if (tle && tle->resname)
}
else
{
- char buf[32];
+ char buf[32];
snprintf(buf, sizeof(buf), "?column%d?", attnum);
*attname = pstrdup(buf);
/*
* This case should not occur: a column of a table or values list
- * shouldn't have type RECORD. Fall through and fail
- * (most likely) at the bottom.
+ * shouldn't have type RECORD. Fall through and fail (most
+ * likely) at the bottom.
*/
break;
case RTE_SUBQUERY:
* that's not a Var, and then pass it to
* get_expr_result_type().
*/
- Plan *subplan = (Plan *) rte->funcexpr;
+ Plan *subplan = (Plan *) rte->funcexpr;
for (;;)
{
if (get_expr_result_type(arg, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
tupdesc = lookup_rowtype_tupdesc_copy(exprType(arg),
- exprTypmod(arg));
+ exprTypmod(arg));
Assert(tupdesc);
/* Got the tupdesc, so we can extract the field name */
Assert(fno >= 1 && fno <= tupdesc->natts);
if (caseexpr->arg)
{
/*
- * The parser should have produced WHEN clauses of
- * the form "CaseTestExpr = RHS"; we want to show
- * just the RHS. If the user wrote something silly
- * like "CASE boolexpr WHEN TRUE THEN ...", then
- * the optimizer's simplify_boolean_equality() may
- * have reduced this to just "CaseTestExpr" or
- * "NOT CaseTestExpr", for which we have to show
- * "TRUE" or "FALSE". Also, depending on context
- * the original CaseTestExpr might have been reduced
- * to a Const (but we won't see "WHEN Const").
+ * The parser should have produced WHEN clauses of the
+ * form "CaseTestExpr = RHS"; we want to show just the
+ * RHS. If the user wrote something silly like "CASE
+ * boolexpr WHEN TRUE THEN ...", then the optimizer's
+ * simplify_boolean_equality() may have reduced this
+ * to just "CaseTestExpr" or "NOT CaseTestExpr", for
+ * which we have to show "TRUE" or "FALSE". Also,
+ * depending on context the original CaseTestExpr
+ * might have been reduced to a Const (but we won't
+ * see "WHEN Const").
*/
if (IsA(w, OpExpr))
{
get_rule_expr(e, context, true);
sep = ", ";
}
+
/*
- * We assume that the name of the first-column operator
- * will do for all the rest too. This is definitely
- * open to failure, eg if some but not all operators
- * were renamed since the construct was parsed, but there
- * seems no way to be perfect.
+ * We assume that the name of the first-column operator will
+ * do for all the rest too. This is definitely open to
+ * failure, eg if some but not all operators were renamed
+ * since the construct was parsed, but there seems no way to
+ * be perfect.
*/
appendStringInfo(buf, ") %s ROW(",
- generate_operator_name(linitial_oid(rcexpr->opnos),
- exprType(linitial(rcexpr->largs)),
- exprType(linitial(rcexpr->rargs))));
+ generate_operator_name(linitial_oid(rcexpr->opnos),
+ exprType(linitial(rcexpr->largs)),
+ exprType(linitial(rcexpr->rargs))));
sep = "";
foreach(arg, rcexpr->rargs)
{
}
appendStringInfo(buf, "%s(%s",
- generate_function_name(aggref->aggfnoid, nargs, argtypes),
+ generate_function_name(aggref->aggfnoid, nargs, argtypes),
aggref->aggdistinct ? "DISTINCT " : "");
/* aggstar can be set only in zero-argument aggregates */
if (aggref->aggstar)
/*
* We form the string literal according to the prevailing setting
- * of standard_conforming_strings; we never use E''.
- * User is responsible for making sure result is used correctly.
+ * of standard_conforming_strings; we never use E''. User is
+ * responsible for making sure result is used correctly.
*/
appendStringInfoChar(buf, '\'');
for (valptr = extval; *valptr; valptr++)
appendStringInfoChar(buf, '(');
/*
- * Note that we print the name of only the first operator, when there
- * are multiple combining operators. This is an approximation that
- * could go wrong in various scenarios (operators in different schemas,
- * renamed operators, etc) but there is not a whole lot we can do about
- * it, since the syntax allows only one operator to be shown.
+ * Note that we print the name of only the first operator, when there are
+ * multiple combining operators. This is an approximation that could go
+ * wrong in various scenarios (operators in different schemas, renamed
+ * operators, etc) but there is not a whole lot we can do about it, since
+ * the syntax allows only one operator to be shown.
*/
if (sublink->testexpr)
{
if (IsA(sublink->testexpr, OpExpr))
{
/* single combining operator */
- OpExpr *opexpr = (OpExpr *) sublink->testexpr;
+ OpExpr *opexpr = (OpExpr *) sublink->testexpr;
get_rule_expr(linitial(opexpr->args), context, true);
opname = generate_operator_name(opexpr->opno,
sep = "";
foreach(l, ((BoolExpr *) sublink->testexpr)->args)
{
- OpExpr *opexpr = (OpExpr *) lfirst(l);
+ OpExpr *opexpr = (OpExpr *) lfirst(l);
Assert(IsA(opexpr, OpExpr));
appendStringInfoString(buf, sep);
get_rule_expr((Node *) rcexpr->largs, context, true);
opname = generate_operator_name(linitial_oid(rcexpr->opnos),
exprType(linitial(rcexpr->largs)),
- exprType(linitial(rcexpr->rargs)));
+ exprType(linitial(rcexpr->rargs)));
appendStringInfoChar(buf, ')');
}
else
Anum_pg_class_reloptions, &isnull);
if (!isnull)
{
- Datum sep,
- txt;
+ Datum sep,
+ txt;
/*
* We want to use array_to_text(reloptions, ', ') --- but
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.213 2006/09/20 19:50:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.214 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static double ineq_histogram_selectivity(VariableStatData *vardata,
- FmgrInfo *opproc, bool isgt,
- Datum constval, Oid consttype);
+ FmgrInfo *opproc, bool isgt,
+ Datum constval, Oid consttype);
static bool convert_to_scalar(Datum value, Oid valuetypid, double *scaledvalue,
Datum lobound, Datum hibound, Oid boundstypid,
double *scaledlobound, double *scaledhibound);
static bool get_variable_maximum(PlannerInfo *root, VariableStatData *vardata,
Oid sortop, Datum *max);
static Selectivity prefix_selectivity(VariableStatData *vardata,
- Oid opclass, Const *prefixcon);
+ Oid opclass, Const *prefixcon);
static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
static Datum string_to_datum(const char *str, Oid datatype);
static Const *string_to_const(const char *str, Oid datatype);
else
{
/*
- * No ANALYZE stats available, so make a guess using estimated
- * number of distinct values and assuming they are equally common.
- * (The guess is unlikely to be very good, but we do know a few
- * special cases.)
+ * No ANALYZE stats available, so make a guess using estimated number
+ * of distinct values and assuming they are equally common. (The guess
+ * is unlikely to be very good, but we do know a few special cases.)
*/
selec = 1.0 / get_variable_numdistinct(&vardata);
}
*
* Note that the result disregards both the most-common-values (if any) and
* null entries. The caller is expected to combine this result with
- * statistics for those portions of the column population. It may also be
+ * statistics for those portions of the column population. It may also be
* prudent to clamp the result range, ie, disbelieve exact 0 or 1 outputs.
*/
double
if (nvalues > 1)
{
/*
- * Use binary search to find proper location, ie, the first
- * slot at which the comparison fails. (If the given operator
- * isn't actually sort-compatible with the histogram, you'll
- * get garbage results ... but probably not any more garbage-y
- * than you would from the old linear search.)
+ * Use binary search to find proper location, ie, the first slot
+ * at which the comparison fails. (If the given operator isn't
+ * actually sort-compatible with the histogram, you'll get garbage
+ * results ... but probably not any more garbage-y than you would
+ * from the old linear search.)
*/
- double histfrac;
- int lobound = 0; /* first possible slot to search */
- int hibound = nvalues; /* last+1 slot to search */
+ double histfrac;
+ int lobound = 0; /* first possible slot to search */
+ int hibound = nvalues; /* last+1 slot to search */
while (lobound < hibound)
{
- int probe = (lobound + hibound) / 2;
- bool ltcmp;
+ int probe = (lobound + hibound) / 2;
+ bool ltcmp;
ltcmp = DatumGetBool(FunctionCall2(opproc,
values[probe],
binfrac = (val - low) / (high - low);
/*
- * Watch out for the possibility that we got a NaN
- * or Infinity from the division. This can happen
- * despite the previous checks, if for example
- * "low" is -Infinity.
+ * Watch out for the possibility that we got a NaN or
+ * Infinity from the division. This can happen
+ * despite the previous checks, if for example "low"
+ * is -Infinity.
*/
if (isnan(binfrac) ||
binfrac < 0.0 || binfrac > 1.0)
else
{
/*
- * Ideally we'd produce an error here, on the grounds
- * that the given operator shouldn't have scalarXXsel
- * registered as its selectivity func unless we can
- * deal with its operand types. But currently, all
- * manner of stuff is invoking scalarXXsel, so give a
- * default estimate until that can be fixed.
+ * Ideally we'd produce an error here, on the grounds that
+ * the given operator shouldn't have scalarXXsel
+ * registered as its selectivity func unless we can deal
+ * with its operand types. But currently, all manner of
+ * stuff is invoking scalarXXsel, so give a default
+ * estimate until that can be fixed.
*/
binfrac = 0.5;
}
/*
* Now, compute the overall selectivity across the values
- * represented by the histogram. We have i-1 full bins
- * and binfrac partial bin below the constant.
+ * represented by the histogram. We have i-1 full bins and
+ * binfrac partial bin below the constant.
*/
histfrac = (double) (i - 1) + binfrac;
histfrac /= (double) (nvalues - 1);
/*
* If we have most-common-values info, add up the fractions of the MCV
* entries that satisfy MCV OP PATTERN. These fractions contribute
- * directly to the result selectivity. Also add up the total fraction
+ * directly to the result selectivity. Also add up the total fraction
* represented by MCV entries.
*/
mcv_selec = mcv_selectivity(&vardata, &opproc, constval, true,
RegProcedure oprsel;
FmgrInfo oprselproc;
Datum selarg4;
- Selectivity s1;
+ Selectivity s1;
/*
- * First, look up the underlying operator's selectivity estimator.
- * Punt if it hasn't got one.
+ * First, look up the underlying operator's selectivity estimator. Punt if
+ * it hasn't got one.
*/
if (is_join_clause)
{
* We consider three cases:
*
* 1. rightop is an Array constant: deconstruct the array, apply the
- * operator's selectivity function for each array element, and merge
- * the results in the same way that clausesel.c does for AND/OR
- * combinations.
+ * operator's selectivity function for each array element, and merge the
+ * results in the same way that clausesel.c does for AND/OR combinations.
*
* 2. rightop is an ARRAY[] construct: apply the operator's selectivity
* function for each element of the ARRAY[] construct, and merge.
s1 = useOr ? 0.0 : 1.0;
for (i = 0; i < num_elems; i++)
{
- List *args;
+ List *args;
Selectivity s2;
args = list_make2(leftop,
s1 = useOr ? 0.0 : 1.0;
foreach(l, arrayexpr->elements)
{
- List *args;
+ List *args;
Selectivity s2;
args = list_make2(leftop, lfirst(l));
else
{
CaseTestExpr *dummyexpr;
- List *args;
+ List *args;
Selectivity s2;
- int i;
+ int i;
/*
* We need a dummy rightop to pass to the operator selectivity
- * routine. It can be pretty much anything that doesn't look like
- * a constant; CaseTestExpr is a convenient choice.
+ * routine. It can be pretty much anything that doesn't look like a
+ * constant; CaseTestExpr is a convenient choice.
*/
dummyexpr = makeNode(CaseTestExpr);
dummyexpr->typeId = get_element_type(exprType(rightop));
PointerGetDatum(args),
selarg4));
s1 = useOr ? 0.0 : 1.0;
+
/*
- * Arbitrarily assume 10 elements in the eventual array value
- * (see also estimate_array_length)
+ * Arbitrarily assume 10 elements in the eventual array value (see
+ * also estimate_array_length)
*/
for (i = 0; i < 10; i++)
{
* == as you'd expect. Can't any of these people program their way
* out of a paper bag?
*/
-#if _MSC_VER == 1400 /* VS.Net 2005 */
- /* http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx?FeedbackID=99694 */
+#if _MSC_VER == 1400 /* VS.Net 2005 */
+
+ /*
+ * http://connect.microsoft.com/VisualStudio/feedback/ViewFeedback.aspx
+ * ?FeedbackID=99694
+ */
{
- char x[1];
+ char x[1];
+
xfrmlen = strxfrm(x, val, 0);
}
#else
- xfrmlen = strxfrm(NULL, val, 0);
+ xfrmlen = strxfrm(NULL, val, 0);
#endif
xfrmstr = (char *) palloc(xfrmlen + 1);
xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1);
if (rte->inh)
{
/*
- * XXX This means the Var represents a column of an append relation.
- * Later add code to look at the member relations and try to derive
- * some kind of combined statistics?
+ * XXX This means the Var represents a column of an append
+ * relation. Later add code to look at the member relations and
+ * try to derive some kind of combined statistics?
*/
}
else if (rte->rtekind == RTE_RELATION)
/*
* Merge the two selectivities in the same way as for a range query
- * (see clauselist_selectivity()). Note that we don't need to worry
+ * (see clauselist_selectivity()). Note that we don't need to worry
* about double-exclusion of nulls, since ineq_histogram_selectivity
* doesn't count those anyway.
*/
/*
* A zero or negative prefixsel should be converted into a small
- * positive value; we probably are dealing with a very tight range
- * and got a bogus result due to roundoff errors.
+ * positive value; we probably are dealing with a very tight range and
+ * got a bogus result due to roundoff errors.
*/
if (prefixsel <= 0.0)
prefixsel = 1.0e-10;
selectivityQuals = indexQuals;
/*
- * Check for ScalarArrayOpExpr index quals, and estimate the number
- * of index scans that will be performed.
+ * Check for ScalarArrayOpExpr index quals, and estimate the number of
+ * index scans that will be performed.
*/
num_sa_scans = 1;
foreach(l, indexQuals)
if (IsA(rinfo->clause, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *saop = (ScalarArrayOpExpr *) rinfo->clause;
- int alength = estimate_array_length(lsecond(saop->args));
+ int alength = estimate_array_length(lsecond(saop->args));
if (alength > 1)
num_sa_scans *= alength;
numIndexTuples = rint(numIndexTuples / num_sa_scans);
/*
- * We can bound the number of tuples by the index size in any case.
- * Also, always estimate at least one tuple is touched, even when
+ * We can bound the number of tuples by the index size in any case. Also,
+ * always estimate at least one tuple is touched, even when
* indexSelectivity estimate is tiny.
*/
if (numIndexTuples > index->tuples)
/*
* Estimate the number of index pages that will be retrieved.
*
- * We use the simplistic method of taking a pro-rata fraction of the
- * total number of index pages. In effect, this counts only leaf pages
- * and not any overhead such as index metapage or upper tree levels.
- * In practice this seems a better approximation than charging for
- * access to the upper levels, perhaps because those tend to stay in
- * cache under load.
+ * We use the simplistic method of taking a pro-rata fraction of the total
+ * number of index pages. In effect, this counts only leaf pages and not
+ * any overhead such as index metapage or upper tree levels. In practice
+ * this seems a better approximation than charging for access to the upper
+ * levels, perhaps because those tend to stay in cache under load.
*/
if (index->pages > 1 && index->tuples > 1)
numIndexPages = ceil(numIndexTuples * index->pages / index->tuples);
/*
* Now compute the disk access costs.
*
- * The above calculations are all per-index-scan. However, if we are
- * in a nestloop inner scan, we can expect the scan to be repeated (with
+ * The above calculations are all per-index-scan. However, if we are in a
+ * nestloop inner scan, we can expect the scan to be repeated (with
* different search keys) for each row of the outer relation. Likewise,
- * ScalarArrayOpExpr quals result in multiple index scans. This
- * creates the potential for cache effects to reduce the number of
- * disk page fetches needed. We want to estimate the average per-scan
- * I/O cost in the presence of caching.
+ * ScalarArrayOpExpr quals result in multiple index scans. This creates
+ * the potential for cache effects to reduce the number of disk page
+ * fetches needed. We want to estimate the average per-scan I/O cost in
+ * the presence of caching.
*
* We use the Mackert-Lohman formula (see costsize.c for details) to
* estimate the total number of page fetches that occur. While this
* wasn't what it was designed for, it seems a reasonable model anyway.
- * Note that we are counting pages not tuples anymore, so we take
- * N = T = index size, as if there were one "tuple" per page.
+ * Note that we are counting pages not tuples anymore, so we take N = T =
+ * index size, as if there were one "tuple" per page.
*/
if (outer_rel != NULL && outer_rel->rows > 1)
{
root);
/*
- * Now compute the total disk access cost, and then report a
- * pro-rated share for each outer scan. (Don't pro-rate for
- * ScalarArrayOpExpr, since that's internal to the indexscan.)
+ * Now compute the total disk access cost, and then report a pro-rated
+ * share for each outer scan. (Don't pro-rate for ScalarArrayOpExpr,
+ * since that's internal to the indexscan.)
*/
*indexTotalCost = (pages_fetched * random_page_cost) / num_outer_scans;
}
}
/*
- * A difficulty with the leaf-pages-only cost approach is that for
- * small selectivities (eg, single index tuple fetched) all indexes
- * will look equally attractive because we will estimate exactly 1
- * leaf page to be fetched. All else being equal, we should prefer
- * physically smaller indexes over larger ones. (An index might be
- * smaller because it is partial or because it contains fewer columns;
- * presumably the other columns in the larger index aren't useful to
- * the query, or the larger index would have better selectivity.)
+ * A difficulty with the leaf-pages-only cost approach is that for small
+ * selectivities (eg, single index tuple fetched) all indexes will look
+ * equally attractive because we will estimate exactly 1 leaf page to be
+ * fetched. All else being equal, we should prefer physically smaller
+ * indexes over larger ones. (An index might be smaller because it is
+ * partial or because it contains fewer columns; presumably the other
+ * columns in the larger index aren't useful to the query, or the larger
+ * index would have better selectivity.)
*
* We can deal with this by adding a very small "fudge factor" that
* depends on the index size. The fudge factor used here is one
- * random_page_cost per 100000 index pages, which should be small
- * enough to not alter index-vs-seqscan decisions, but will prevent
- * indexes of different sizes from looking exactly equally attractive.
+ * random_page_cost per 100000 index pages, which should be small enough
+ * to not alter index-vs-seqscan decisions, but will prevent indexes of
+ * different sizes from looking exactly equally attractive.
*/
*indexTotalCost += index->pages * random_page_cost / 100000.0;
* For a RowCompareExpr, we consider only the first column, just as
* rowcomparesel() does.
*
- * If there's a ScalarArrayOpExpr in the quals, we'll actually perform
- * N index scans not one, but the ScalarArrayOpExpr's operator can be
+ * If there's a ScalarArrayOpExpr in the quals, we'll actually perform N
+ * index scans not one, but the ScalarArrayOpExpr's operator can be
* considered to act the same as it normally does.
*/
indexBoundQuals = NIL;
* ordering, but don't negate it entirely. Before 8.0 we divided the
* correlation by the number of columns, but that seems too strong.)
*
- * We can skip all this if we found a ScalarArrayOpExpr, because then
- * the call must be for a bitmap index scan, and the caller isn't going
- * to care what the index correlation is.
+ * We can skip all this if we found a ScalarArrayOpExpr, because then the
+ * call must be for a bitmap index scan, and the caller isn't going to
+ * care what the index correlation is.
*/
if (found_saop)
PG_RETURN_VOID();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.55 2006/08/25 04:06:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.56 2006/10/04 00:29:59 momjian Exp $
*
* NOTES
* input routine largely stolen from boxin().
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) == 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) == 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) != 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) != 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) < 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) < 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) <= 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) <= 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) > 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) > 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_BOOL(ItemPointerCompare(arg1,arg2) >= 0);
+ PG_RETURN_BOOL(ItemPointerCompare(arg1, arg2) >= 0);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) >= 0 ? arg1 : arg2);
+ PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1, arg2) >= 0 ? arg1 : arg2);
}
Datum
ItemPointer arg1 = PG_GETARG_ITEMPOINTER(0);
ItemPointer arg2 = PG_GETARG_ITEMPOINTER(1);
- PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1,arg2) <= 0 ? arg1 : arg2);
+ PG_RETURN_ITEMPOINTER(ItemPointerCompare(arg1, arg2) <= 0 ? arg1 : arg2);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.167 2006/09/05 01:13:39 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.168 2006/10/04 00:29:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef HAVE_INT64_TIMESTAMP
result = (time_t) (t / USECS_PER_SEC +
- ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#else
result = (time_t) (t +
- ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
+ ((POSTGRES_EPOCH_JDATE - UNIX_EPOCH_JDATE) * SECS_PER_DAY));
#endif
return result;
* test=> SET timezone = 'EST5EDT';
* test=> SELECT
* test-> ('2005-10-30 13:22:00-05'::timestamptz -
- * test(> '2005-10-29 13:22:00-04'::timestamptz);
+ * test(> '2005-10-29 13:22:00-04'::timestamptz);
* ?column?
* ----------------
* 1 day 01:00:00
* test-> ('2005-10-29 13:22:00-04'::timestamptz +
* test(> ('2005-10-30 13:22:00-05'::timestamptz -
* test(> '2005-10-29 13:22:00-04'::timestamptz)) at time zone 'EST';
- * timezone
+ * timezone
* --------------------
* 2005-10-30 14:22:00
* (1 row)
}
/*
- * interval_justify_interval()
+ * interval_justify_interval()
*
- * Adjust interval so 'month', 'day', and 'time' portions are within
- * customary bounds. Specifically:
+ * Adjust interval so 'month', 'day', and 'time' portions are within
+ * customary bounds. Specifically:
*
- * 0 <= abs(time) < 24 hours
- * 0 <= abs(day) < 30 days
+ * 0 <= abs(time) < 24 hours
+ * 0 <= abs(day) < 30 days
*
- * Also, the sign bit on all three fields is made equal, so either
- * all three fields are negative or all are positive.
+ * Also, the sign bit on all three fields is made equal, so either
+ * all three fields are negative or all are positive.
*/
Datum
interval_justify_interval(PG_FUNCTION_ARGS)
{
Interval *span = PG_GETARG_INTERVAL_P(0);
Interval *result;
-
+
#ifdef HAVE_INT64_TIMESTAMP
int64 wholeday;
#else
result->month--;
}
else if (result->month < 0 &&
- (result->day > 0 || (result->day == 0 && result->time > 0)))
+ (result->day > 0 || (result->day == 0 && result->time > 0)))
{
result->day -= DAYS_PER_MONTH;
result->month++;
result->day--;
}
else if (result->day < 0 && result->time > 0)
- {
+ {
#ifdef HAVE_INT64_TIMESTAMP
result->time -= USECS_PER_DAY;
#else
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder_days, sec_remainder;
- int32 orig_month = span->month, orig_day = span->day;
+ double month_remainder_days,
+ sec_remainder;
+ int32 orig_month = span->month,
+ orig_day = span->day;
Interval *result;
result = (Interval *) palloc(sizeof(Interval));
*/
/*
- * Fractional months full days into days.
+ * Fractional months full days into days.
*
- * Floating point calculation are inherently inprecise, so these
- * calculations are crafted to produce the most reliable result
- * possible. TSROUND() is needed to more accurately produce whole
- * numbers where appropriate.
+ * Floating point calculation are inherently inprecise, so these
+ * calculations are crafted to produce the most reliable result possible.
+ * TSROUND() is needed to more accurately produce whole numbers where
+ * appropriate.
*/
month_remainder_days = (orig_month * factor - result->month) * DAYS_PER_MONTH;
month_remainder_days = TSROUND(month_remainder_days);
sec_remainder = (orig_day * factor - result->day +
- month_remainder_days - (int)month_remainder_days) * SECS_PER_DAY;
+ month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY;
sec_remainder = TSROUND(sec_remainder);
/*
- * Might have 24:00:00 hours due to rounding, or >24 hours because of
- * time cascade from months and days. It might still be >24 if the
- * combination of cascade and the seconds factor operation itself.
+ * Might have 24:00:00 hours due to rounding, or >24 hours because of time
+ * cascade from months and days. It might still be >24 if the combination
+ * of cascade and the seconds factor operation itself.
*/
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int)(sec_remainder / SECS_PER_DAY);
- sec_remainder -= (int)(sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
+ result->day += (int) (sec_remainder / SECS_PER_DAY);
+ sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
{
Interval *span = PG_GETARG_INTERVAL_P(0);
float8 factor = PG_GETARG_FLOAT8(1);
- double month_remainder_days, sec_remainder;
- int32 orig_month = span->month, orig_day = span->day;
+ double month_remainder_days,
+ sec_remainder;
+ int32 orig_month = span->month,
+ orig_day = span->day;
Interval *result;
-
+
result = (Interval *) palloc(sizeof(Interval));
if (factor == 0.0)
result->day = (int32) (span->day / factor);
/*
- * Fractional months full days into days. See comment in
- * interval_mul().
+ * Fractional months full days into days. See comment in interval_mul().
*/
month_remainder_days = (orig_month / factor - result->month) * DAYS_PER_MONTH;
month_remainder_days = TSROUND(month_remainder_days);
sec_remainder = (orig_day / factor - result->day +
- month_remainder_days - (int)month_remainder_days) * SECS_PER_DAY;
+ month_remainder_days - (int) month_remainder_days) * SECS_PER_DAY;
sec_remainder = TSROUND(sec_remainder);
if (Abs(sec_remainder) >= SECS_PER_DAY)
{
- result->day += (int)(sec_remainder / SECS_PER_DAY);
- sec_remainder -= (int)(sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
+ result->day += (int) (sec_remainder / SECS_PER_DAY);
+ sec_remainder -= (int) (sec_remainder / SECS_PER_DAY) * SECS_PER_DAY;
}
/* cascade units down */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.118 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.119 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
len2 = bcTruelen(arg2);
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = false;
len2 = bcTruelen(arg2);
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (len1 != len2)
result = true;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.150 2006/07/14 14:52:24 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.151 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool result;
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (VARSIZE(arg1) != VARSIZE(arg2))
result = false;
bool result;
/*
- * Since we only care about equality or not-equality, we can avoid all
- * the expense of strcoll() here, and just do bitwise comparison.
+ * Since we only care about equality or not-equality, we can avoid all the
+ * expense of strcoll() here, and just do bitwise comparison.
*/
if (VARSIZE(arg1) != VARSIZE(arg2))
result = true;
text *ret_text;
regex_t *re = (regex_t *) regexp;
int src_text_len = VARSIZE(src_text) - VARHDRSZ;
- StringInfoData buf;
+ StringInfoData buf;
regmatch_t pmatch[REGEXP_REPLACE_BACKREF_CNT];
pg_wchar *data;
size_t data_len;
for (search_start = data_pos = 0; search_start <= data_len;)
{
- int regexec_result;
+ int regexec_result;
regexec_result = pg_regexec(re,
data,
int typlen;
bool typbyval;
char typalign;
- StringInfoData buf;
+ StringInfoData buf;
bool printed = false;
char *p;
bits8 *bitmap;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.132 2006/07/31 20:09:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.133 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* nbuckets is the number of hash buckets to use in this catcache.
- * Currently we just use a hard-wired estimate of an appropriate size
- * for each cache; maybe later make them dynamically resizable?
+ * Currently we just use a hard-wired estimate of an appropriate size for
+ * each cache; maybe later make them dynamically resizable?
*
* nbuckets must be a power of two. We check this via Assert rather than
* a full runtime check because the values will be coming from constant
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.77 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.78 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* However, it also makes the system unbelievably slow --- the regression
* tests take about 100 times longer than normal.
*
- * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY.
- * This slows things by at least a factor of 10000, so I wouldn't suggest
- * trying to run the entire regression tests that way. It's useful to
- * try a few simple tests, to make sure that cache reload isn't subject
- * to internal cache-flush hazards, but after you've done a few thousand
+ * If you're a glutton for punishment, try CLOBBER_CACHE_RECURSIVELY. This
+ * slows things by at least a factor of 10000, so I wouldn't suggest
+ * trying to run the entire regression tests that way. It's useful to try
+ * a few simple tests, to make sure that cache reload isn't subject to
+ * internal cache-flush hazards, but after you've done a few thousand
* recursive reloads it's unlikely you'll learn more.
*/
#if defined(CLOBBER_CACHE_ALWAYS)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.137 2006/09/28 20:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.138 2006/10/04 00:30:00 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
/*
* Get the nominal left-hand input type of the operator; we will ignore
- * opclasses that don't have that as the expected input datatype. This
- * is a kluge to avoid being confused by binary-compatible opclasses
- * (such as text_ops and varchar_ops, which share the same operators).
+ * opclasses that don't have that as the expected input datatype. This is
+ * a kluge to avoid being confused by binary-compatible opclasses (such as
+ * text_ops and varchar_ops, which share the same operators).
*/
op_input_types(opno, &lefttype, &righttype);
Assert(OidIsValid(lefttype));
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(opno),
0, 0, 0);
+
/*
- * If we can't find any opclass containing the op, perhaps it is a
- * <> operator. See if it has a negator that is in an opclass.
+ * If we can't find any opclass containing the op, perhaps it is a <>
+ * operator. See if it has a negator that is in an opclass.
*/
op_negated = false;
if (catlist->n_members == 0)
{
- Oid op_negator = get_negator(opno);
+ Oid op_negator = get_negator(opno);
if (OidIsValid(op_negator))
{
Form_pg_type typeStruct;
/*
- * In bootstrap mode, pass it off to bootstrap.c. This hack allows
- * us to use array_in and array_out during bootstrap.
+ * In bootstrap mode, pass it off to bootstrap.c. This hack allows us to
+ * use array_in and array_out during bootstrap.
*/
if (IsBootstrapProcessingMode())
{
- Oid typinput;
- Oid typoutput;
+ Oid typinput;
+ Oid typoutput;
boot_get_type_io_data(typid,
typlen,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.248 2006/09/05 21:08:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.249 2006/10/04 00:30:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void RelationFlushRelation(Relation relation);
static bool load_relcache_init_file(void);
static void write_relcache_init_file(void);
-static void write_item(const void *data, Size len, FILE *fp);
+static void write_item(const void *data, Size len, FILE *fp);
static void formrdesc(const char *relationName, Oid relationReltype,
bool hasoids, int natts, FormData_pg_attribute *att);
/*
* Copy the relation tuple form
*
- * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE.
- * The variable-length fields (relacl, reloptions) are NOT stored in the
+ * We only allocate space for the fixed fields, ie, CLASS_TUPLE_SIZE. The
+ * variable-length fields (relacl, reloptions) are NOT stored in the
* relcache --- there'd be little point in it, since we don't copy the
* tuple's nulls bitmap and hence wouldn't know if the values are valid.
- * Bottom line is that relacl *cannot* be retrieved from the relcache.
- * Get it from the syscache if you need it. The same goes for the
- * original form of reloptions (however, we do store the parsed form
- * of reloptions in rd_options).
+ * Bottom line is that relacl *cannot* be retrieved from the relcache. Get
+ * it from the syscache if you need it. The same goes for the original
+ * form of reloptions (however, we do store the parsed form of reloptions
+ * in rd_options).
*/
relationForm = (Form_pg_class) palloc(CLASS_TUPLE_SIZE);
}
/*
- * Fetch reloptions from tuple; have to use a hardwired descriptor
- * because we might not have any other for pg_class yet (consider
- * executing this code for pg_class itself)
+ * Fetch reloptions from tuple; have to use a hardwired descriptor because
+ * we might not have any other for pg_class yet (consider executing this
+ * code for pg_class itself)
*/
datum = fastgetattr(tuple,
Anum_pg_class_reloptions,
*
* Note: since we scan the rules using RewriteRelRulenameIndexId, we will
* be reading the rules in name order, except possibly during
- * emergency-recovery operations (ie, IgnoreSystemIndexes). This in
- * turn ensures that rules will be fired in name order.
+ * emergency-recovery operations (ie, IgnoreSystemIndexes). This in turn
+ * ensures that rules will be fired in name order.
*/
rewrite_desc = heap_open(RewriteRelationId, AccessShareLock);
rewrite_tupdesc = RelationGetDescr(rewrite_desc);
rule->isInstead = rewrite_form->is_instead;
/*
- * Must use heap_getattr to fetch ev_action and ev_qual. Also,
- * the rule strings are often large enough to be toasted. To avoid
- * leaking memory in the caller's context, do the detoasting here
- * so we can free the detoasted version.
+ * Must use heap_getattr to fetch ev_action and ev_qual. Also, the
+ * rule strings are often large enough to be toasted. To avoid
+ * leaking memory in the caller's context, do the detoasting here so
+ * we can free the detoasted version.
*/
rule_datum = heap_getattr(rewrite_tuple,
Anum_pg_rewrite_ev_action,
/*
* We want the rule's table references to be checked as though by the
- * table owner, not the user referencing the rule. Therefore, scan
+ * table owner, not the user referencing the rule. Therefore, scan
* through the rule's actions and set the checkAsUser field on all
- * rtable entries. We have to look at the qual as well, in case it
+ * rtable entries. We have to look at the qual as well, in case it
* contains sublinks.
*
- * The reason for doing this when the rule is loaded, rather than
- * when it is stored, is that otherwise ALTER TABLE OWNER would have
- * to grovel through stored rules to update checkAsUser fields.
- * Scanning the rule tree during load is relatively cheap (compared
- * to constructing it in the first place), so we do it here.
+ * The reason for doing this when the rule is loaded, rather than when
+ * it is stored, is that otherwise ALTER TABLE OWNER would have to
+ * grovel through stored rules to update checkAsUser fields. Scanning
+ * the rule tree during load is relatively cheap (compared to
+ * constructing it in the first place), so we do it here.
*/
setRuleCheckAsUser((Node *) rule->actions, relation->rd_rel->relowner);
setRuleCheckAsUser(rule->qual, relation->rd_rel->relowner);
* Even non-system indexes should not be blown away if they are open and
* have valid index support information. This avoids problems with active
* use of the index support information. As with nailed indexes, we
- * re-read the pg_class row to handle possible physical relocation of
- * the index.
+ * re-read the pg_class row to handle possible physical relocation of the
+ * index.
*/
if (relation->rd_rel->relkind == RELKIND_INDEX &&
relation->rd_refcnt > 0 &&
relation->rd_indexcxt != NULL)
{
- relation->rd_isvalid = false; /* needs to be revalidated */
+ relation->rd_isvalid = false; /* needs to be revalidated */
RelationReloadClassinfo(relation);
return;
}
/*
* check that hardwired list of shared rels matches what's in the
- * bootstrap .bki file. If you get a failure here during initdb,
- * you probably need to fix IsSharedRelation() to match whatever
- * you've done to the set of shared relations.
+ * bootstrap .bki file. If you get a failure here during initdb, you
+ * probably need to fix IsSharedRelation() to match whatever you've done
+ * to the set of shared relations.
*/
if (shared_relation != IsSharedRelation(relid))
elog(ERROR, "shared_relation flag for \"%s\" does not match IsSharedRelation(%u)",
* the system catalogs. We first try to read pre-computed relcache
* entries from the pg_internal.init file. If that's missing or
* broken, make phony entries for the minimum set of nailed-in-cache
- * relations. Then (unless bootstrapping) make sure we have entries
+ * relations. Then (unless bootstrapping) make sure we have entries
* for the critical system indexes. Once we've done all this, we
* have enough infrastructure to open any system catalog or use any
* catcache. The last step is to rewrite pg_internal.init if needed.
HASH_SEQ_STATUS status;
RelIdCacheEnt *idhentry;
MemoryContext oldcxt;
- bool needNewCacheFile = false;
+ bool needNewCacheFile = false;
/*
* switch to cache memory context
* RewriteRelRulenameIndexId and TriggerRelidNameIndexId are not critical
* in the same way as the others, because the critical catalogs don't
* (currently) have any rules or triggers, and so these indexes can be
- * rebuilt without inducing recursion. However they are used during
+ * rebuilt without inducing recursion. However they are used during
* relcache load when a rel does have rules or triggers, so we choose to
* nail them for performance reasons.
*/
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
result = CreateTemplateTupleDesc(natts, hasoids);
- result->tdtypeid = RECORDOID; /* not right, but we don't care */
+ result->tdtypeid = RECORDOID; /* not right, but we don't care */
result->tdtypmod = -1;
for (i = 0; i < natts; i++)
if ((nread = fread(rel->rd_options, 1, len, fp)) != len)
goto read_failed;
if (len != VARATT_SIZE(rel->rd_options))
- goto read_failed; /* sanity check */
+ goto read_failed; /* sanity check */
}
else
{
/* finally, write the vector of support procedures */
write_item(rel->rd_support,
- relform->relnatts * (am->amsupport * sizeof(RegProcedure)),
+ relform->relnatts * (am->amsupport * sizeof(RegProcedure)),
fp);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.106 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.107 2006/10/04 00:30:00 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.21 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.22 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(rel->rd_rel->reltype == typentry->type_id);
/*
- * Link to the tupdesc and increment its refcount (we assert it's
- * a refcounted descriptor). We don't use IncrTupleDescRefCount()
- * for this, because the reference mustn't be entered in the current
+ * Link to the tupdesc and increment its refcount (we assert it's a
+ * refcounted descriptor). We don't use IncrTupleDescRefCount() for
+ * this, because the reference mustn't be entered in the current
* resource owner; it can outlive the current query.
*/
typentry->tupDesc = RelationGetDescr(rel);
TupleDesc
lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
{
- TupleDesc tmp;
+ TupleDesc tmp;
tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
return CreateTupleDescCopyConstr(tmp);
return; /* tupdesc hasn't been requested */
/*
- * Release our refcount and free the tupdesc if none remain.
- * (Can't use DecrTupleDescRefCount because this reference is not
- * logged in current resource owner.)
+ * Release our refcount and free the tupdesc if none remain. (Can't use
+ * DecrTupleDescRefCount because this reference is not logged in current
+ * resource owner.)
*/
Assert(typentry->tupDesc->tdrefcount > 0);
if (--typentry->tupDesc->tdrefcount == 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.90 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.91 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* signatures for PostgreSQL-specific library init/fini functions */
-typedef void (*PG_init_t)(void);
-typedef void (*PG_fini_t)(void);
+typedef void (*PG_init_t) (void);
+typedef void (*PG_fini_t) (void);
/* hashtable entry for rendezvous variables */
typedef struct
-{
- char varName[NAMEDATALEN]; /* hash key (must be first) */
- void *varValue;
+{
+ char varName[NAMEDATALEN]; /* hash key (must be first) */
+ void *varValue;
} rendezvousHashEntry;
/*
/*
* Load the specified dynamic-link library file, unless it already is
- * loaded. Return the pg_dl* handle for the file.
+ * loaded. Return the pg_dl* handle for the file.
*
* Note: libname is expected to be an exact name for the library file.
*/
const Pg_magic_struct *magic_data_ptr = (*magic_func) ();
if (magic_data_ptr->len != magic_data.len ||
- memcmp(magic_data_ptr, &magic_data, magic_data.len) != 0)
+ memcmp(magic_data_ptr, &magic_data, magic_data.len) != 0)
{
/* copy data block before unlinking library */
Pg_magic_struct module_magic_data = *magic_data_ptr;
free((char *) file_scanner);
/*
- * Report suitable error. It's probably not worth writing
- * a separate error message for each field; only the most
- * common case of wrong major version gets its own message.
+ * Report suitable error. It's probably not worth writing a
+ * separate error message for each field; only the most common
+ * case of wrong major version gets its own message.
*/
if (module_magic_data.version != magic_data.version)
ereport(ERROR,
- (errmsg("incompatible library \"%s\": version mismatch",
- libname),
- errdetail("Server is version %d.%d, library is version %d.%d.",
- magic_data.version/100,
- magic_data.version % 100,
- module_magic_data.version/100,
- module_magic_data.version % 100)));
+ (errmsg("incompatible library \"%s\": version mismatch",
+ libname),
+ errdetail("Server is version %d.%d, library is version %d.%d.",
+ magic_data.version / 100,
+ magic_data.version % 100,
+ module_magic_data.version / 100,
+ module_magic_data.version % 100)));
ereport(ERROR,
- (errmsg("incompatible library \"%s\": magic block mismatch",
- libname)));
+ (errmsg("incompatible library \"%s\": magic block mismatch",
+ libname)));
}
}
else
free((char *) file_scanner);
/* complain */
ereport(ERROR,
- (errmsg("incompatible library \"%s\": missing magic block",
- libname),
- errhint("Extension libraries are now required to use the PG_MODULE_MAGIC macro.")));
+ (errmsg("incompatible library \"%s\": missing magic block",
+ libname),
+ errhint("Extension libraries are now required to use the PG_MODULE_MAGIC macro.")));
}
/*
*/
PG_init = (PG_init_t) pg_dlsym(file_scanner->handle, "_PG_init");
if (PG_init)
- (*PG_init)();
+ (*PG_init) ();
/* OK to link it into list */
if (file_list == NULL)
*/
PG_fini = (PG_fini_t) pg_dlsym(file_scanner->handle, "_PG_fini");
if (PG_fini)
- (*PG_fini)();
+ (*PG_fini) ();
clear_external_function_hash(file_scanner->handle);
pg_dlclose(file_scanner->handle);
}
/*
- * If we can't find the file, just return the string as-is.
- * The ensuing load attempt will fail and report a suitable message.
+ * If we can't find the file, just return the string as-is. The ensuing
+ * load attempt will fail and report a suitable message.
*/
return pstrdup(name);
}
/*
- * Find (or create) a rendezvous variable that one dynamically
+ * Find (or create) a rendezvous variable that one dynamically
* loaded library can use to meet up with another.
*
* On the first call of this function for a particular varName,
* to find each other and share information: they just need to agree
* on the variable name and the data it will point to.
*/
-void **
+void **
find_rendezvous_variable(const char *varName)
{
- static HTAB *rendezvousHash = NULL;
+ static HTAB *rendezvousHash = NULL;
rendezvousHashEntry *hentry;
- bool found;
+ bool found;
/* Create a hashtable if we haven't already done so in this process */
if (rendezvousHash == NULL)
{
- HASHCTL ctl;
+ HASHCTL ctl;
MemSet(&ctl, 0, sizeof(ctl));
- ctl.keysize = NAMEDATALEN;
- ctl.entrysize = sizeof(rendezvousHashEntry);
+ ctl.keysize = NAMEDATALEN;
+ ctl.entrysize = sizeof(rendezvousHashEntry);
rendezvousHash = hash_create("Rendezvous variable hash",
16,
&ctl,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.101 2006/05/30 21:21:30 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/fmgr.c,v 1.102 2006/10/04 00:30:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* This is little more than window dressing for FunctionCall1, but its use is
* recommended anyway so that code invoking output functions can be identified
- * easily. Note however that it does guarantee a non-toasted result.
+ * easily. Note however that it does guarantee a non-toasted result.
*/
bytea *
SendFunctionCall(FmgrInfo *flinfo, Datum val)
*
* dynahash.c supports both local-to-a-backend hash tables and hash tables in
* shared memory. For shared hash tables, it is the caller's responsibility
- * to provide appropriate access interlocking. The simplest convention is
- * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
+ * to provide appropriate access interlocking. The simplest convention is
+ * that a single LWLock protects the whole hash table. Searches (HASH_FIND or
* hash_seq_search) need only shared lock, but any update requires exclusive
* lock. For heavily-used shared tables, the single-lock approach creates a
* concurrency bottleneck, so we also support "partitioned" locking wherein
* there are multiple LWLocks guarding distinct subsets of the table. To use
* a hash table in partitioned mode, the HASH_PARTITION flag must be given
- * to hash_create. This prevents any attempt to split buckets on-the-fly.
+ * to hash_create. This prevents any attempt to split buckets on-the-fly.
* Therefore, each hash bucket chain operates independently, and no fields
* of the hash header change after init except nentries and freeList.
* A partitioned table uses a spinlock to guard changes of those two fields.
* This lets any subset of the hash buckets be treated as a separately
- * lockable partition. We expect callers to use the low-order bits of a
+ * lockable partition. We expect callers to use the low-order bits of a
* lookup key's hash value as a partition number --- this will work because
* of the way calc_bucket() maps hash values to bucket numbers.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.72 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.73 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
+ long num_partitions; /* # partitions (must be power of 2), or 0 */
long ffactor; /* target fill factor */
long max_dsize; /* 'dsize' limit if directory is fixed size */
long ssize; /* segment size --- must be power of 2 */
int nelem_alloc; /* number of entries to allocate at once */
#ifdef HASH_STATISTICS
+
/*
- * Count statistics here. NB: stats code doesn't bother with mutex,
- * so counts could be corrupted a bit in a partitioned table.
+ * Count statistics here. NB: stats code doesn't bother with mutex, so
+ * counts could be corrupted a bit in a partitioned table.
*/
long accesses;
long collisions;
hashp->hash = string_hash; /* default hash function */
/*
- * If you don't specify a match function, it defaults to string_compare
- * if you used string_hash (either explicitly or by default) and to memcmp
+ * If you don't specify a match function, it defaults to string_compare if
+ * you used string_hash (either explicitly or by default) and to memcmp
* otherwise. (Prior to PostgreSQL 7.4, memcmp was always used.)
*/
if (flags & HASH_COMPARE)
{
/*
* ctl structure and directory are preallocated for shared memory
- * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set
- * as well.
+ * tables. Note that HASH_DIRSIZE and HASH_ALLOC had better be set as
+ * well.
*/
hashp->hctl = info->hctl;
hashp->dir = (HASHSEGMENT *) (((char *) info->hctl) + sizeof(HASHHDR));
* This reduces problems with run-time out-of-shared-memory conditions.
*
* For a non-shared hash table, preallocate the requested number of
- * elements if it's less than our chosen nelem_alloc. This avoids
- * wasting space if the caller correctly estimates a small table size.
+ * elements if it's less than our chosen nelem_alloc. This avoids wasting
+ * space if the caller correctly estimates a small table size.
*/
if ((flags & HASH_SHARED_MEM) ||
nelem < hctl->nelem_alloc)
elementSize = MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(entrysize);
/*
- * The idea here is to choose nelem_alloc at least 32, but round up
- * so that the allocation request will be a power of 2 or just less.
- * This makes little difference for hash tables in shared memory,
- * but for hash tables managed by palloc, the allocation request
- * will be rounded up to a power of 2 anyway. If we fail to take
- * this into account, we'll waste as much as half the allocated space.
+ * The idea here is to choose nelem_alloc at least 32, but round up so
+ * that the allocation request will be a power of 2 or just less. This
+ * makes little difference for hash tables in shared memory, but for hash
+ * tables managed by palloc, the allocation request will be rounded up to
+ * a power of 2 anyway. If we fail to take this into account, we'll waste
+ * as much as half the allocated space.
*/
allocSize = 32 * 4; /* assume elementSize at least 8 */
- do {
+ do
+ {
allocSize <<= 1;
nelem_alloc = allocSize / elementSize;
} while (nelem_alloc < 32);
/* Check if it is time to split a bucket */
/* Can't split if running in partitioned mode */
if (!IS_PARTITIONED(hctl) &&
- hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
+ hctl->nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor)
{
/*
* NOTE: failure to expand table is not a fatal error, it just
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.28 2006/09/27 18:40:09 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/hashfn.c,v 1.29 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* because when it is copied into the hash table it will be truncated at
* that length.
*/
- Size s_len = strlen((const char *) key);
+ Size s_len = strlen((const char *) key);
- s_len = Min(s_len, keysize-1);
+ s_len = Min(s_len, keysize - 1);
return DatumGetUInt32(hash_any((const unsigned char *) key,
(int) s_len));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.98 2006/05/02 11:28:55 teodor Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.99 2006/10/04 00:30:02 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
int VacuumCostBalance = 0; /* working state for vacuum */
bool VacuumCostActive = false;
-int GinFuzzySearchLimit = 0;
+int GinFuzzySearchLimit = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.158 2006/08/16 04:32:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.159 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ----------------------------------------------------------------
*/
-bool IgnoreSystemIndexes = false;
+bool IgnoreSystemIndexes = false;
/* ----------------------------------------------------------------
* system index reindexing support
*-------------------------------------------------------------------------
*/
-/*
+/*
* GUC variables: lists of library names to be preloaded at postmaster
* start and at backend start
*/
/* If restricting, insert $libdir/plugins if not mentioned already */
if (restricted && first_dir_separator(filename) == NULL)
{
- char *expanded;
+ char *expanded;
expanded = palloc(strlen("$libdir/plugins/") + strlen(filename) + 1);
strcpy(expanded, "$libdir/plugins/");
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.170 2006/09/18 22:40:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.171 2006/10/04 00:30:02 momjian Exp $
*
*
*-------------------------------------------------------------------------
/*
* Check permissions to connect to the database.
*
- * These checks are not enforced when in standalone mode, so that
- * there is a way to recover from disabling all access to all databases,
- * for example "UPDATE pg_database SET datallowconn = false;".
+ * These checks are not enforced when in standalone mode, so that there is
+ * a way to recover from disabling all access to all databases, for
+ * example "UPDATE pg_database SET datallowconn = false;".
*
* We do not enforce them for the autovacuum process either.
*/
name)));
/*
- * Check privilege to connect to the database. (The am_superuser
- * test is redundant, but since we have the flag, might as well
- * check it and save a few cycles.)
+ * Check privilege to connect to the database. (The am_superuser test
+ * is redundant, but since we have the flag, might as well check it
+ * and save a few cycles.)
*/
if (!am_superuser &&
pg_database_aclcheck(MyDatabaseId, GetUserId(),
char *fullpath;
/*
- * Set up the global variables holding database id and path. But note
- * we won't actually try to touch the database just yet.
+ * Set up the global variables holding database id and path. But note we
+ * won't actually try to touch the database just yet.
*
* We take a shortcut in the bootstrap case, otherwise we have to look up
* the db name in pg_database.
SetDatabasePath(fullpath);
/*
- * Finish filling in the PGPROC struct, and add it to the ProcArray.
- * (We need to know MyDatabaseId before we can do this, since it's entered
+ * Finish filling in the PGPROC struct, and add it to the ProcArray. (We
+ * need to know MyDatabaseId before we can do this, since it's entered
* into the PGPROC struct.)
*
* Once I have done this, I am visible to other backends!
/*
* Initialize the relation cache and the system catalog caches. Note that
* no catalog access happens here; we only set up the hashtable structure.
- * We must do this before starting a transaction because transaction
- * abort would try to touch these hashtables.
+ * We must do this before starting a transaction because transaction abort
+ * would try to touch these hashtables.
*/
RelationCacheInitialize();
InitCatalogCache();
/*
* Now that we have a transaction, we can take locks. Take a writer's
- * lock on the database we are trying to connect to. If there is
- * a concurrently running DROP DATABASE on that database, this will
- * block us until it finishes (and has updated the flat file copy
- * of pg_database).
+ * lock on the database we are trying to connect to. If there is a
+ * concurrently running DROP DATABASE on that database, this will block us
+ * until it finishes (and has updated the flat file copy of pg_database).
*
- * Note that the lock is not held long, only until the end of this
- * startup transaction. This is OK since we are already advertising
- * our use of the database in the PGPROC array; anyone trying a DROP
- * DATABASE after this point will see us there.
+ * Note that the lock is not held long, only until the end of this startup
+ * transaction. This is OK since we are already advertising our use of
+ * the database in the PGPROC array; anyone trying a DROP DATABASE after
+ * this point will see us there.
*
* Note: use of RowExclusiveLock here is reasonable because we envision
- * our session as being a concurrent writer of the database. If we had
- * a way of declaring a session as being guaranteed-read-only, we could
- * use AccessShareLock for such sessions and thereby not conflict against
+ * our session as being a concurrent writer of the database. If we had a
+ * way of declaring a session as being guaranteed-read-only, we could use
+ * AccessShareLock for such sessions and thereby not conflict against
* CREATE DATABASE.
*/
if (!bootstrap)
*/
if (!bootstrap)
{
- Oid dbid2;
- Oid tsid2;
+ Oid dbid2;
+ Oid tsid2;
if (!FindMyDatabase(dbname, &dbid2, &tsid2) ||
dbid2 != MyDatabaseId || tsid2 != MyDatabaseTableSpace)
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("It seems to have just been dropped or renamed.")));
+ errdetail("It seems to have just been dropped or renamed.")));
}
/*
- * Now we should be able to access the database directory safely.
- * Verify it's there and looks reasonable.
+ * Now we should be able to access the database directory safely. Verify
+ * it's there and looks reasonable.
*/
if (!bootstrap)
{
(errcode(ERRCODE_UNDEFINED_DATABASE),
errmsg("database \"%s\" does not exist",
dbname),
- errdetail("The database subdirectory \"%s\" is missing.",
- fullpath)));
+ errdetail("The database subdirectory \"%s\" is missing.",
+ fullpath)));
else
ereport(FATAL,
(errcode_for_file_access(),
initialize_acl();
/*
- * Read the real pg_database row for our database, check permissions
- * and set up database-specific GUC settings. We can't do this until all
- * the database-access infrastructure is up. (Also, it wants to know if
- * the user is a superuser, so the above stuff has to happen first.)
+ * Read the real pg_database row for our database, check permissions and
+ * set up database-specific GUC settings. We can't do this until all the
+ * database-access infrastructure is up. (Also, it wants to know if the
+ * user is a superuser, so the above stuff has to happen first.)
*/
if (!bootstrap)
CheckMyDatabase(dbname, am_superuser);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.60 2006/05/21 20:05:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conv.c,v 1.61 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
else
{
- int l = pg_mic_mblen(mic);
+ int l = pg_mic_mblen(mic);
if (len < l)
report_invalid_encoding(PG_MULE_INTERNAL, (const char *) mic,
}
else
{
- int l = pg_mic_mblen(mic);
+ int l = pg_mic_mblen(mic);
if (len < l)
report_invalid_encoding(PG_MULE_INTERNAL, (const char *) mic,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c,v 1.15 2006/05/30 22:12:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/euc_tw_and_big5/euc_tw_and_big5.c,v 1.16 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(const char *) euc, len);
if (c1 == SS2)
{
- c1 = euc[1]; /* plane No. */
+ c1 = euc[1]; /* plane No. */
if (c1 == 0xa1)
*p++ = LC_CNS11643_1;
else if (c1 == 0xa2)
*p++ = euc[3];
}
else
- { /* CNS11643-1 */
+ { /* CNS11643-1 */
*p++ = LC_CNS11643_1;
*p++ = c1;
*p++ = euc[1];
{
*p++ = 0x9d; /* LCPRV2 */
}
- *p++ = lc; /* Plane No. */
+ *p++ = lc; /* Plane No. */
*p++ = (cnsBuf >> 8) & 0x00ff;
*p++ = cnsBuf & 0x00ff;
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.16 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_cyrillic/utf8_and_cyrillic.c,v 1.17 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
PG_RETURN_VOID();
}
-
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.15 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_gb18030/utf8_and_gb18030.c,v 1.16 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(len >= 0);
UtfToLocal(src, dest, ULmapGB18030,
- sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), PG_GB18030, len);
+ sizeof(ULmapGB18030) / sizeof(pg_utf_to_local), PG_GB18030, len);
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.21 2006/07/11 18:26:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859/utf8_and_iso8859.c,v 1.22 2006/10/04 00:30:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(1) == PG_UTF8);
Assert(len >= 0);
- for (i=0;ii++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
+ errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
PG_RETURN_VOID();
}
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(0) == PG_UTF8);
Assert(len >= 0);
- for (i=0;ii++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
- errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
+ errmsg("unexpected encoding id %d for ISO-8859 charsets", encoding)));
PG_RETURN_VOID();
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c,v 1.17 2006/05/30 22:12:15 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_iso8859_1/utf8_and_iso8859_1.c,v 1.18 2006/10/04 00:30:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
else
{
- int l = pg_utf_mblen(src);
+ int l = pg_utf_mblen(src);
if (l > len || !pg_utf8_islegal(src, l))
report_invalid_encoding(PG_UTF8, (const char *) src, len);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.5 2006/07/11 18:26:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/conversion_procs/utf8_and_win/utf8_and_win.c,v 1.6 2006/10/04 00:30:03 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static pg_conv_map maps[] = {
{PG_WIN866, LUmapWIN866, ULmapWIN866,
sizeof(LUmapWIN866) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN866) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN866) / sizeof(pg_utf_to_local)},
{PG_WIN874, LUmapWIN874, ULmapWIN874,
sizeof(LUmapWIN874) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN874) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN874) / sizeof(pg_utf_to_local)},
{PG_WIN1250, LUmapWIN1250, ULmapWIN1250,
sizeof(LUmapWIN1250) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1250) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1250) / sizeof(pg_utf_to_local)},
{PG_WIN1251, LUmapWIN1251, ULmapWIN1251,
sizeof(LUmapWIN1251) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1251) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1251) / sizeof(pg_utf_to_local)},
{PG_WIN1252, LUmapWIN1252, ULmapWIN1252,
sizeof(LUmapWIN1252) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1252) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1252) / sizeof(pg_utf_to_local)},
{PG_WIN1253, LUmapWIN1253, ULmapWIN1253,
sizeof(LUmapWIN1253) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1253) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1253) / sizeof(pg_utf_to_local)},
{PG_WIN1254, LUmapWIN1254, ULmapWIN1254,
sizeof(LUmapWIN1254) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1254) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1254) / sizeof(pg_utf_to_local)},
{PG_WIN1255, LUmapWIN1255, ULmapWIN1255,
sizeof(LUmapWIN1255) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1255) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1255) / sizeof(pg_utf_to_local)},
{PG_WIN1256, LUmapWIN1256, ULmapWIN1256,
sizeof(LUmapWIN1256) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1256) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1256) / sizeof(pg_utf_to_local)},
{PG_WIN1257, LUmapWIN1257, ULmapWIN1257,
sizeof(LUmapWIN1257) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1257) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1257) / sizeof(pg_utf_to_local)},
{PG_WIN1258, LUmapWIN1258, ULmapWIN1258,
sizeof(LUmapWIN1258) / sizeof(pg_local_to_utf),
- sizeof(ULmapWIN1258) / sizeof(pg_utf_to_local)},
+ sizeof(ULmapWIN1258) / sizeof(pg_utf_to_local)},
};
Datum
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(1) == PG_UTF8);
Assert(len >= 0);
- for (i=0;ii++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
unsigned char *src = (unsigned char *) PG_GETARG_CSTRING(2);
unsigned char *dest = (unsigned char *) PG_GETARG_CSTRING(3);
int len = PG_GETARG_INT32(4);
- int i;
+ int i;
Assert(PG_GETARG_INT32(0) == PG_UTF8);
Assert(len >= 0);
- for (i=0;ii++)
+ for (i = 0; i < sizeof(maps) / sizeof(pg_conv_map); i++)
{
if (encoding == maps[i].encoding)
{
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.58 2006/07/14 14:52:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.59 2006/10/04 00:30:02 momjian Exp $
*/
#include "postgres.h"
else
{
/*
- * This is the first time through, so create the context. Make
- * it a child of TopMemoryContext so that these values survive
- * across transactions.
+ * This is the first time through, so create the context. Make it a
+ * child of TopMemoryContext so that these values survive across
+ * transactions.
*/
MbProcContext = AllocSetContextCreate(TopMemoryContext,
"MbProcContext",
{
/*
* No conversion is possible, but we must still validate the data,
- * because the client-side code might have done string escaping
- * using the selected client_encoding. If the client encoding is
- * ASCII-safe then we just do a straight validation under that
- * encoding. For an ASCII-unsafe encoding we have a problem:
- * we dare not pass such data to the parser but we have no way
- * to convert it. We compromise by rejecting the data if it
- * contains any non-ASCII characters.
+ * because the client-side code might have done string escaping using
+ * the selected client_encoding. If the client encoding is ASCII-safe
+ * then we just do a straight validation under that encoding. For an
+ * ASCII-unsafe encoding we have a problem: we dare not pass such data
+ * to the parser but we have no way to convert it. We compromise by
+ * rejecting the data if it contains any non-ASCII characters.
*/
if (PG_VALID_BE_ENCODING(ClientEncoding->encoding))
(void) pg_verify_mbstr(ClientEncoding->encoding, s, len, false);
else
{
- int i;
+ int i;
for (i = 0; i < len; i++)
{
if (s[i] == '\0' || IS_HIGHBIT_SET(s[i]))
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte value for encoding \"%s\": 0x%02x",
- pg_enc2name_tbl[PG_SQL_ASCII].name,
- (unsigned char) s[i])));
+ errmsg("invalid byte value for encoding \"%s\": 0x%02x",
+ pg_enc2name_tbl[PG_SQL_ASCII].name,
+ (unsigned char) s[i])));
}
}
return (char *) s;
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.57 2006/08/22 12:11:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.58 2006/10/04 00:30:02 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
* SQL/ASCII
*/
static int
-pg_ascii2wchar_with_len
+ pg_ascii2wchar_with_len
(const unsigned char *from, pg_wchar *to, int len)
{
int cnt = 0;
return 0;
if (*s < 0x20 || *s == 0x7f)
return -1;
-
+
return 1;
}
while (len > 0 && *from)
{
- if (*from == SS2 && len >= 2) /* JIS X 0201 (so called "1 byte KANA") */
+ if (*from == SS2 && len >= 2) /* JIS X 0201 (so called "1 byte
+ * KANA") */
{
from++;
*to = (SS2 << 8) | *from++;
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* JIS X 0208 KANJI */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* JIS X 0208 KANJI */
{
*to = *from++ << 8;
*to |= *from++;
len -= 2;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
*to = *from++;
len--;
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 1 */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 1 */
{
*to = *from++ << 8;
*to |= *from++;
if (*from == SS2 && len >= 4) /* code set 2 */
{
from++;
- *to = (SS2 << 24) | (*from++ << 16) ;
+ *to = (SS2 << 24) | (*from++ << 16);
*to |= *from++ << 8;
*to |= *from++;
len -= 4;
*to |= *from++;
len -= 3;
}
- else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 2 */
+ else if (IS_HIGHBIT_SET(*from) && len >= 2) /* code set 2 */
{
*to = *from++ << 8;
*to |= *from++;
/* auxiliary function for binary search in interval table */
static int
-mbbisearch(pg_wchar ucs, const struct mbinterval *table, int max)
+mbbisearch(pg_wchar ucs, const struct mbinterval * table, int max)
{
int min = 0;
int mid;
else if (IS_LCPRV2(*s))
len = 4;
else
- len = 1; /* assume ASCII */
+ len = 1; /* assume ASCII */
return len;
}
static int
pg_mule_dsplen(const unsigned char *s)
{
- int len;
+ int len;
if (IS_LC1(*s))
len = 1;
else if (IS_LCPRV2(*s))
len = 2;
else
- len = 1; /* assume ASCII */
+ len = 1; /* assume ASCII */
return len;
}
int len;
if (*s >= 0xa1 && *s <= 0xdf)
- len = 1; /* 1 byte kana? */
+ len = 1; /* 1 byte kana? */
else if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
int len;
if (*s >= 0xa1 && *s <= 0xdf)
- len = 1; /* 1 byte kana? */
+ len = 1; /* 1 byte kana? */
else if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* kanji? */
+ len = 2; /* kanji? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* 2byte? */
+ len = 2; /* 2byte? */
else
- len = 1; /* should be ASCII */
+ len = 1; /* should be ASCII */
return len;
}
int len;
if (IS_HIGHBIT_SET(*s))
- len = 2; /* 2byte? */
+ len = 2; /* 2byte? */
else
- len = pg_ascii_dsplen(s); /* should be ASCII */
+ len = pg_ascii_dsplen(s); /* should be ASCII */
return len;
}
int len;
if (!IS_HIGHBIT_SET(*s))
- len = 1; /* ASCII */
+ len = 1; /* ASCII */
else
{
if ((*(s + 1) >= 0x40 && *(s + 1) <= 0x7e) || (*(s + 1) >= 0x80 && *(s + 1) <= 0xfe))
if (IS_HIGHBIT_SET(*s))
len = 2;
else
- len = pg_ascii_dsplen(s); /* ASCII */
+ len = pg_ascii_dsplen(s); /* ASCII */
return len;
}
pg_eucjp_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
switch (c1)
{
- case SS2: /* JIS X 0201 */
+ case SS2: /* JIS X 0201 */
l = 2;
if (l > len)
return -1;
return -1;
break;
- case SS3: /* JIS X 0212 */
+ case SS3: /* JIS X 0212 */
l = 3;
if (l > len)
return -1;
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
pg_euckr_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
pg_euctw_verifier(const unsigned char *s, int len)
{
int l;
- unsigned char c1, c2;
+ unsigned char c1,
+ c2;
c1 = *s++;
switch (c1)
{
- case SS2: /* CNS 11643 Plane 1-7 */
+ case SS2: /* CNS 11643 Plane 1-7 */
l = 4;
if (l > len)
return -1;
return -1;
break;
- case SS3: /* unused */
+ case SS3: /* unused */
return -1;
default:
if (!IS_EUC_RANGE_VALID(c2))
return -1;
}
- else /* must be ASCII */
+ else
+ /* must be ASCII */
{
l = 1;
}
static int
pg_johab_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
unsigned char c;
l = mbl = pg_johab_mblen(s);
static int
pg_mule_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
unsigned char c;
l = mbl = pg_mule_mblen(s);
static int
pg_sjis_verifier(const unsigned char *s, int len)
{
- int l, mbl;
- unsigned char c1, c2;
+ int l,
+ mbl;
+ unsigned char c1,
+ c2;
l = mbl = pg_sjis_mblen(s);
static int
pg_big5_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_big5_mblen(s);
static int
pg_gbk_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_gbk_mblen(s);
static int
pg_uhc_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_uhc_mblen(s);
static int
pg_gb18030_verifier(const unsigned char *s, int len)
{
- int l, mbl;
+ int l,
+ mbl;
l = mbl = pg_gb18030_mblen(s);
static int
pg_utf8_verifier(const unsigned char *s, int len)
{
- int l = pg_utf_mblen(s);
+ int l = pg_utf_mblen(s);
if (len < l)
return -1;
*-------------------------------------------------------------------
*/
pg_wchar_tbl pg_wchar_table[] = {
- {pg_ascii2wchar_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* 0; PG_SQL_ASCII */
- {pg_eucjp2wchar_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* 1; PG_EUC_JP */
- {pg_euccn2wchar_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 3}, /* 2; PG_EUC_CN */
- {pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* 3; PG_EUC_KR */
- {pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, pg_euctw_verifier, 3}, /* 4; PG_EUC_TW */
- {pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 5; PG_JOHAB */
+ {pg_ascii2wchar_with_len, pg_ascii_mblen, pg_ascii_dsplen, pg_ascii_verifier, 1}, /* 0; PG_SQL_ASCII */
+ {pg_eucjp2wchar_with_len, pg_eucjp_mblen, pg_eucjp_dsplen, pg_eucjp_verifier, 3}, /* 1; PG_EUC_JP */
+ {pg_euccn2wchar_with_len, pg_euccn_mblen, pg_euccn_dsplen, pg_euccn_verifier, 3}, /* 2; PG_EUC_CN */
+ {pg_euckr2wchar_with_len, pg_euckr_mblen, pg_euckr_dsplen, pg_euckr_verifier, 3}, /* 3; PG_EUC_KR */
+ {pg_euctw2wchar_with_len, pg_euctw_mblen, pg_euctw_dsplen, pg_euctw_verifier, 3}, /* 4; PG_EUC_TW */
+ {pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, pg_johab_verifier, 3}, /* 5; PG_JOHAB */
{pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, pg_utf8_verifier, 4}, /* 6; PG_UTF8 */
- {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, pg_mule_verifier, 3}, /* 7; PG_MULE_INTERNAL */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 8; PG_LATIN1 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 9; PG_LATIN2 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 10; PG_LATIN3 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 11; PG_LATIN4 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 12; PG_LATIN5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 13; PG_LATIN6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 14; PG_LATIN7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 15; PG_LATIN8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 16; PG_LATIN9 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 17; PG_LATIN10 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 18; PG_WIN1256 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 19; PG_WIN1258 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 20; PG_WIN874 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 21; PG_KOI8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1251 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1252 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 23; PG_WIN866 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 24; ISO-8859-5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 25; ISO-8859-6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 26; ISO-8859-7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 27; ISO-8859-8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 28; PG_WIN1250 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 29; PG_WIN1253 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 30; PG_WIN1254 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 31; PG_WIN1255 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 32; PG_WIN1257 */
- {0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* 33; PG_SJIS */
- {0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* 34; PG_BIG5 */
+ {pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, pg_mule_verifier, 3}, /* 7; PG_MULE_INTERNAL */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 8; PG_LATIN1 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 9; PG_LATIN2 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 10; PG_LATIN3 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 11; PG_LATIN4 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 12; PG_LATIN5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 13; PG_LATIN6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 14; PG_LATIN7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 15; PG_LATIN8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 16; PG_LATIN9 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 17; PG_LATIN10 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 18; PG_WIN1256 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 19; PG_WIN1258 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 20; PG_WIN874 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 21; PG_KOI8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1251 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 22; PG_WIN1252 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 23; PG_WIN866 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 24; ISO-8859-5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 25; ISO-8859-6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 26; ISO-8859-7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 27; ISO-8859-8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 28; PG_WIN1250 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 29; PG_WIN1253 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 30; PG_WIN1254 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 31; PG_WIN1255 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, pg_latin1_verifier, 1}, /* 32; PG_WIN1257 */
+ {0, pg_sjis_mblen, pg_sjis_dsplen, pg_sjis_verifier, 2}, /* 33; PG_SJIS */
+ {0, pg_big5_mblen, pg_big5_dsplen, pg_big5_verifier, 2}, /* 34; PG_BIG5 */
{0, pg_gbk_mblen, pg_gbk_dsplen, pg_gbk_verifier, 2}, /* 35; PG_GBK */
{0, pg_uhc_mblen, pg_uhc_dsplen, pg_uhc_verifier, 2}, /* 36; PG_UHC */
- {0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 2} /* 37; PG_GB18030 */
+ {0, pg_gb18030_mblen, pg_gb18030_dsplen, pg_gb18030_verifier, 2} /* 37; PG_GB18030 */
};
/* returns the byte length of a word for mule internal code */
return ((encoding >= 0 &&
encoding < sizeof(pg_wchar_table) / sizeof(pg_wchar_tbl)) ?
- ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
- ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
+ ((*pg_wchar_table[encoding].mbverify) ((const unsigned char *) mbstr, len)) :
+ ((*pg_wchar_table[PG_SQL_ASCII].mbverify) ((const unsigned char *) mbstr, len)));
}
/*
errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
pg_enc2name_tbl[encoding].name,
buf),
- errhint("This error can also happen if the byte sequence does not "
- "match the encoding expected by the server, which is controlled "
- "by \"client_encoding\".")));
+ errhint("This error can also happen if the byte sequence does not "
+ "match the encoding expected by the server, which is controlled "
+ "by \"client_encoding\".")));
}
/*
ereport(ERROR,
(errcode(ERRCODE_UNTRANSLATABLE_CHARACTER),
- errmsg("character 0x%s of encoding \"%s\" has no equivalent in \"%s\"",
- buf,
- pg_enc2name_tbl[src_encoding].name,
- pg_enc2name_tbl[dest_encoding].name)));
+ errmsg("character 0x%s of encoding \"%s\" has no equivalent in \"%s\"",
+ buf,
+ pg_enc2name_tbl[src_encoding].name,
+ pg_enc2name_tbl[dest_encoding].name)));
}
#endif
* Written by Peter Eisentraut
.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.353 2006/10/03 21:11:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.354 2006/10/04 00:30:03 momjian Exp $
*
*--------------------------------------------------------------------
*/
static char *log_statement_str;
static char *log_min_error_statement_str;
static char *log_destination_string;
+
#ifdef HAVE_SYSLOG
static char *syslog_facility_str;
static char *syslog_ident_str;
static char *regex_flavor_string;
static char *server_encoding_string;
static char *server_version_string;
-static int server_version_num;
+static int server_version_num;
static char *timezone_string;
static char *timezone_abbreviations_string;
static char *XactIsoLevel_string;
{
{"allow_system_table_mods", PGC_POSTMASTER, DEVELOPER_OPTIONS,
- gettext_noop("Allows modifications of the structure of system tables."),
- NULL,
- GUC_NOT_IN_SAMPLE
+ gettext_noop("Allows modifications of the structure of system tables."),
+ NULL,
+ GUC_NOT_IN_SAMPLE
},
&allowSystemTableMods,
false, NULL, NULL
{
{"ignore_system_indexes", PGC_BACKEND, DEVELOPER_OPTIONS,
- gettext_noop("Disables reading from system indexes."),
- gettext_noop("It does not prevent updating the indexes, so it is safe "
- "to use. The worst consequence is slowness."),
- GUC_NOT_IN_SAMPLE
+ gettext_noop("Disables reading from system indexes."),
+ gettext_noop("It does not prevent updating the indexes, so it is safe "
+ "to use. The worst consequence is slowness."),
+ GUC_NOT_IN_SAMPLE
},
&IgnoreSystemIndexes,
false, NULL, NULL
{
{
{"archive_timeout", PGC_SIGHUP, WAL_SETTINGS,
- gettext_noop("Forces a switch to the next xlog file if a "
- "new file has not been started within N seconds."),
- NULL,
- GUC_UNIT_S
+ gettext_noop("Forces a switch to the next xlog file if a "
+ "new file has not been started within N seconds."),
+ NULL,
+ GUC_UNIT_S
},
&XLogArchiveTimeout,
0, 0, INT_MAX, NULL, NULL
},
{
{"post_auth_delay", PGC_BACKEND, DEVELOPER_OPTIONS,
- gettext_noop("Waits N seconds on connection startup after authentication."),
- gettext_noop("This allows attaching a debugger to the process."),
- GUC_NOT_IN_SAMPLE | GUC_UNIT_S
+ gettext_noop("Waits N seconds on connection startup after authentication."),
+ gettext_noop("This allows attaching a debugger to the process."),
+ GUC_NOT_IN_SAMPLE | GUC_UNIT_S
},
&PostAuthDelay,
0, 0, INT_MAX, NULL, NULL
{"default_tablespace", PGC_USERSET, CLIENT_CONN_STATEMENT,
gettext_noop("Sets the default tablespace to create tables and indexes in."),
gettext_noop("An empty string selects the database's default tablespace."),
- GUC_IS_NAME
+ GUC_IS_NAME
},
&default_tablespace,
"", assign_default_tablespace, NULL
static void ShowGUCConfigOption(const char *name, DestReceiver *dest);
static void ShowAllGUCConfig(DestReceiver *dest);
static char *_ShowOption(struct config_generic * record, bool use_units);
-static bool is_newvalue_equal(struct config_generic *record, const char *newvalue);
+static bool is_newvalue_equal(struct config_generic * record, const char *newvalue);
/*
if ((flags & GUC_UNIT_MEMORY) && endptr != value)
{
- bool used = false;
+ bool used = false;
while (*endptr == ' ')
endptr++;
switch (flags & GUC_UNIT_MEMORY)
{
case GUC_UNIT_BLOCKS:
- val /= (BLCKSZ/1024);
+ val /= (BLCKSZ / 1024);
break;
case GUC_UNIT_XBLOCKS:
- val /= (XLOG_BLCKSZ/1024);
+ val /= (XLOG_BLCKSZ / 1024);
break;
}
}
if ((flags & GUC_UNIT_TIME) && endptr != value)
{
- bool used = false;
+ bool used = false;
while (*endptr == ' ')
endptr++;
{
switch (flags & GUC_UNIT_TIME)
{
- case GUC_UNIT_S:
- val /= MS_PER_S;
- break;
- case GUC_UNIT_MIN:
- val /= MS_PER_MIN;
- break;
+ case GUC_UNIT_S:
+ val /= MS_PER_S;
+ break;
+ case GUC_UNIT_MIN:
+ val /= MS_PER_MIN;
+ break;
}
}
}
newval = guc_strdup(elevel, value);
if (newval == NULL)
return false;
+
/*
- * The only sort of "parsing" check we need to do is
- * apply truncation if GUC_IS_NAME.
+ * The only sort of "parsing" check we need to do is apply
+ * truncation if GUC_IS_NAME.
*/
if (conf->gen.flags & GUC_IS_NAME)
truncate_identifier(newval, strlen(newval), true);
values[2] = "kB";
break;
case GUC_UNIT_BLOCKS:
- snprintf(buf, sizeof(buf), "%dkB", BLCKSZ/1024);
+ snprintf(buf, sizeof(buf), "%dkB", BLCKSZ / 1024);
values[2] = buf;
break;
case GUC_UNIT_XBLOCKS:
- snprintf(buf, sizeof(buf), "%dkB", XLOG_BLCKSZ/1024);
+ snprintf(buf, sizeof(buf), "%dkB", XLOG_BLCKSZ / 1024);
values[2] = buf;
break;
case GUC_UNIT_MS:
val = (*conf->show_hook) ();
else
{
- char unit[4];
- int result = *conf->variable;
+ char unit[4];
+ int result = *conf->variable;
if (use_units && result > 0 && (record->flags & GUC_UNIT_MEMORY))
{
switch (record->flags & GUC_UNIT_MEMORY)
{
case GUC_UNIT_BLOCKS:
- result *= BLCKSZ/1024;
+ result *= BLCKSZ / 1024;
break;
case GUC_UNIT_XBLOCKS:
- result *= XLOG_BLCKSZ/1024;
+ result *= XLOG_BLCKSZ / 1024;
break;
}
strcpy(unit, "");
snprintf(buffer, sizeof(buffer), "%d%s",
- (int)result, unit);
+ (int) result, unit);
val = buffer;
}
}
static bool
-is_newvalue_equal(struct config_generic *record, const char *newvalue)
+is_newvalue_equal(struct config_generic * record, const char *newvalue)
{
switch (record->vartype)
{
case PGC_BOOL:
- {
- struct config_bool *conf = (struct config_bool *) record;
- bool newval;
+ {
+ struct config_bool *conf = (struct config_bool *) record;
+ bool newval;
- return parse_bool(newvalue, &newval) && *conf->variable == newval;
- }
+ return parse_bool(newvalue, &newval) && *conf->variable == newval;
+ }
case PGC_INT:
- {
- struct config_int *conf = (struct config_int *) record;
- int newval;
+ {
+ struct config_int *conf = (struct config_int *) record;
+ int newval;
- return parse_int(newvalue, &newval, record->flags) && *conf->variable == newval;
- }
+ return parse_int(newvalue, &newval, record->flags) && *conf->variable == newval;
+ }
case PGC_REAL:
- {
- struct config_real *conf = (struct config_real *) record;
- double newval;
+ {
+ struct config_real *conf = (struct config_real *) record;
+ double newval;
- return parse_real(newvalue, &newval) && *conf->variable == newval;
- }
+ return parse_real(newvalue, &newval) && *conf->variable == newval;
+ }
case PGC_STRING:
- {
- struct config_string *conf = (struct config_string *) record;
+ {
+ struct config_string *conf = (struct config_string *) record;
- return strcmp(*conf->variable, newvalue) == 0;
- }
+ return strcmp(*conf->variable, newvalue) == 0;
+ }
}
return false;
if (newval)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("assertion checking is not supported by this build")));
+ errmsg("assertion checking is not supported by this build")));
#endif
return true;
}
assign_backslash_quote(const char *newval, bool doit, GucSource source)
{
BackslashQuoteType bq;
- bool bqbool;
+ bool bqbool;
/*
- * Although only "on", "off", and "safe_encoding" are documented,
- * we use parse_bool so we can accept all the likely variants of
- * "on" and "off".
+ * Although only "on", "off", and "safe_encoding" are documented, we use
+ * parse_bool so we can accept all the likely variants of "on" and "off".
*/
if (pg_strcasecmp(newval, "safe_encoding") == 0)
bq = BACKSLASH_QUOTE_SAFE_ENCODING;
* The powerup value shown above for timezone_abbreviations is "UNKNOWN".
* When we see this we just do nothing. If this value isn't overridden
* from the config file then pg_timezone_abbrev_initialize() will
- * eventually replace it with "Default". This hack has two purposes:
- * to avoid wasting cycles loading values that might soon be overridden
- * from the config file, and to avoid trying to read the timezone abbrev
- * files during InitializeGUCOptions(). The latter doesn't work in an
- * EXEC_BACKEND subprocess because my_exec_path hasn't been set yet and
- * so we can't locate PGSHAREDIR. (Essentially the same hack is used
- * to delay initializing TimeZone ... if we have any more, we should
- * try to clean up and centralize this mechanism ...)
+ * eventually replace it with "Default". This hack has two purposes: to
+ * avoid wasting cycles loading values that might soon be overridden from
+ * the config file, and to avoid trying to read the timezone abbrev files
+ * during InitializeGUCOptions(). The latter doesn't work in an
+ * EXEC_BACKEND subprocess because my_exec_path hasn't been set yet and so
+ * we can't locate PGSHAREDIR. (Essentially the same hack is used to
+ * delay initializing TimeZone ... if we have any more, we should try to
+ * clean up and centralize this mechanism ...)
*/
if (strcmp(newval, "UNKNOWN") == 0)
{
if (timezone_abbreviations_string == NULL ||
strcmp(timezone_abbreviations_string, newval) != 0)
{
- int elevel;
+ int elevel;
/*
* If reading config file, only the postmaster should bleat loudly
- * about problems. Otherwise, it's just this one process doing it,
+ * about problems. Otherwise, it's just this one process doing it,
* and we use WARNING message level.
*/
if (source == PGC_S_FILE)
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.32 2006/09/27 18:40:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.33 2006/10/04 00:30:04 momjian Exp $
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
* various details abducted from various places
#include "utils/ps_status.h"
extern char **environ;
-bool update_process_title = true;
+bool update_process_title = true;
/*
if (!force && !update_process_title)
return;
-
+
#ifndef PS_USE_NONE
/* no ps display for stand-alone backend */
if (!IsUnderPostmaster)
#ifdef PS_USE_WIN32
{
/*
- * Win32 does not support showing any changed arguments. To make it
- * at all possible to track which backend is doing what, we create a
+ * Win32 does not support showing any changed arguments. To make it at
+ * all possible to track which backend is doing what, we create a
* named object that can be viewed with for example Process Explorer.
*/
static HANDLE ident_handle = INVALID_HANDLE_VALUE;
ident_handle = CreateEvent(NULL, TRUE, FALSE, name);
}
#endif /* PS_USE_WIN32 */
-
#endif /* not PS_USE_NONE */
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/tzparser.c,v 1.1 2006/07/25 03:51:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/tzparser.c,v 1.2 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool validateTzEntry(tzEntry *tzentry);
static bool splitTzLine(const char *filename, int lineno,
- char *line, tzEntry *tzentry);
-static int addToArray(tzEntry **base, int *arraysize, int n,
- tzEntry *entry, bool override);
-static int ParseTzFile(const char *filename, int depth,
- tzEntry **base, int *arraysize, int n);
+ char *line, tzEntry *tzentry);
+static int addToArray(tzEntry **base, int *arraysize, int n,
+ tzEntry *entry, bool override);
+static int ParseTzFile(const char *filename, int depth,
+ tzEntry **base, int *arraysize, int n);
/*
unsigned char *p;
/*
- * Check restrictions imposed by datetkntbl storage format (see datetime.c)
+ * Check restrictions imposed by datetkntbl storage format (see
+ * datetime.c)
*/
if (strlen(tzentry->abbrev) > TOKMAXLEN)
{
/*
* Sanity-check the offset: shouldn't exceed 14 hours
*/
- if (tzentry->offset > 14*60*60 ||
- tzentry->offset < -14*60*60)
+ if (tzentry->offset > 14 * 60 * 60 ||
+ tzentry->offset < -14 * 60 * 60)
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
static bool
splitTzLine(const char *filename, int lineno, char *line, tzEntry *tzentry)
{
- char *abbrev;
- char *offset;
- char *offset_endptr;
- char *remain;
- char *is_dst;
+ char *abbrev;
+ char *offset;
+ char *offset_endptr;
+ char *remain;
+ char *is_dst;
tzentry->lineno = lineno;
tzentry->filename = filename;
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("missing time zone offset in time zone file \"%s\", line %d",
- filename, lineno)));
+ errmsg("missing time zone offset in time zone file \"%s\", line %d",
+ filename, lineno)));
return false;
}
tzentry->offset = strtol(offset, &offset_endptr, 10);
remain = is_dst;
}
- if (!remain) /* no more non-whitespace chars */
+ if (!remain) /* no more non-whitespace chars */
return true;
if (remain[0] != '#') /* must be a comment */
addToArray(tzEntry **base, int *arraysize, int n,
tzEntry *entry, bool override)
{
- tzEntry* arrayptr;
+ tzEntry *arrayptr;
int low;
int high;
/*
- * Search the array for a duplicate; as a useful side effect, the array
- * is maintained in sorted order. We use strcmp() to ensure we match
- * the sort order datetime.c expects.
+ * Search the array for a duplicate; as a useful side effect, the array is
+ * maintained in sorted order. We use strcmp() to ensure we match the
+ * sort order datetime.c expects.
*/
arrayptr = *base;
low = 0;
- high = n-1;
+ high = n - 1;
while (low <= high)
{
- int mid = (low + high) >> 1;
- tzEntry *midptr = arrayptr + mid;
- int cmp;
+ int mid = (low + high) >> 1;
+ tzEntry *midptr = arrayptr + mid;
+ int cmp;
cmp = strcmp(entry->abbrev, midptr->abbrev);
if (cmp < 0)
/*
* Found a duplicate entry; complain unless it's the same.
*/
- if (midptr->offset == entry->offset &&
+ if (midptr->offset == entry->offset &&
midptr->is_dst == entry->is_dst)
{
/* return unchanged array */
/* same abbrev but something is different, complain */
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone abbreviation \"%s\" is multiply defined",
- entry->abbrev),
+ errmsg("time zone abbreviation \"%s\" is multiply defined",
+ entry->abbrev),
errdetail("Time zone file \"%s\", line %d conflicts with file \"%s\", line %d.",
midptr->filename, midptr->lineno,
entry->filename, entry->lineno)));
/* Must dup the abbrev to ensure it survives */
arrayptr->abbrev = pstrdup(entry->abbrev);
- return n+1;
+ return n + 1;
}
/*
ParseTzFile(const char *filename, int depth,
tzEntry **base, int *arraysize, int n)
{
- char share_path[MAXPGPATH];
- char file_path[MAXPGPATH];
- FILE *tzFile;
- char tzbuf[1024];
- char *line;
- tzEntry tzentry;
- int lineno = 0;
- bool override = false;
- const char *p;
+ char share_path[MAXPGPATH];
+ char file_path[MAXPGPATH];
+ FILE *tzFile;
+ char tzbuf[1024];
+ char *line;
+ tzEntry tzentry;
+ int lineno = 0;
+ bool override = false;
+ const char *p;
/*
* We enforce that the filename is all alpha characters. This may be
if (depth > 0)
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid time zone file name \"%s\"",
- filename)));
+ errmsg("invalid time zone file name \"%s\"",
+ filename)));
return -1;
}
}
/*
- * The maximal recursion depth is a pretty arbitrary setting.
- * It is hard to imagine that someone needs more than 3 levels so stick
- * with this conservative setting until someone complains.
+ * The maximal recursion depth is a pretty arbitrary setting. It is hard
+ * to imagine that someone needs more than 3 levels so stick with this
+ * conservative setting until someone complains.
*/
if (depth > 3)
{
ereport(tz_elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("time zone file recursion limit exceeded in file \"%s\"",
- filename)));
+ errmsg("time zone file recursion limit exceeded in file \"%s\"",
+ filename)));
return -1;
}
/* else we're at EOF after all */
break;
}
- if (strlen(tzbuf) == sizeof(tzbuf)-1)
+ if (strlen(tzbuf) == sizeof(tzbuf) - 1)
{
/* the line is too long for tzbuf */
ereport(tz_elevel,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("line is too long in time zone file \"%s\", line %d",
- filename, lineno)));
+ errmsg("line is too long in time zone file \"%s\", line %d",
+ filename, lineno)));
return -1;
}
while (*line && isspace((unsigned char) *line))
line++;
- if (*line == '\0') /* empty line */
+ if (*line == '\0') /* empty line */
continue;
- if (*line == '#') /* comment line */
+ if (*line == '#') /* comment line */
continue;
if (pg_strncasecmp(line, "@INCLUDE", strlen("@INCLUDE")) == 0)
{
/* pstrdup so we can use filename in result data structure */
- char* includeFile = pstrdup(line + strlen("@INCLUDE"));
+ char *includeFile = pstrdup(line + strlen("@INCLUDE"));
includeFile = strtok(includeFile, WHITESPACE);
if (!includeFile || !*includeFile)
{
MemoryContext tmpContext;
MemoryContext oldContext;
- tzEntry *array;
+ tzEntry *array;
int arraysize;
int n;
tz_elevel = elevel;
/*
- * Create a temp memory context to work in. This makes it easy to
- * clean up afterwards.
+ * Create a temp memory context to work in. This makes it easy to clean
+ * up afterwards.
*/
tmpContext = AllocSetContextCreate(CurrentMemoryContext,
"TZParserMemory",
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.67 2006/06/28 22:05:37 neilc Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.68 2006/10/04 00:30:04 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
if (oldsize > ALLOC_CHUNK_LIMIT)
{
/*
- * The chunk must have been allocated as a single-chunk block. Find
+ * The chunk must have been allocated as a single-chunk block. Find
* the containing block and use realloc() to make it bigger with
* minimum space wastage.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.95 2006/09/27 18:40:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.96 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
PortalHashEnt *hentry; bool found; \
\
hentry = (PortalHashEnt *) hash_search(PortalHashTable, \
- (NAME), HASH_ENTER, &found); \
+ (NAME), HASH_ENTER, &found); \
if (found) \
elog(ERROR, "duplicate portal name"); \
hentry->portal = PORTAL; \
* Get the "primary" Query within a portal, ie, the one marked canSetTag.
*
* Returns NULL if no such Query. If multiple Query structs within the
- * portal are marked canSetTag, returns the first one. Neither of these
+ * portal are marked canSetTag, returns the first one. Neither of these
* cases should occur in present usages of this function.
*
* Note: the reason this is just handed a List is so that prepared statements
- * can share the code. For use with a portal, use PortalGetPrimaryQuery
+ * can share the code. For use with a portal, use PortalGetPrimaryQuery
* rather than calling this directly.
*/
Query *
Datum
pg_cursor(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- HASH_SEQ_STATUS *hash_seq;
- PortalHashEnt *hentry;
+ FuncCallContext *funcctx;
+ HASH_SEQ_STATUS *hash_seq;
+ PortalHashEnt *hentry;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- MemoryContext oldcontext;
- TupleDesc tupdesc;
+ MemoryContext oldcontext;
+ TupleDesc tupdesc;
/* create a function context for cross-call persistence */
funcctx = SRF_FIRSTCALL_INIT();
/*
- * switch to memory context appropriate for multiple function
- * calls
+ * switch to memory context appropriate for multiple function calls
*/
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx->user_fctx = NULL;
/*
- * build tupdesc for result tuples. This must match the
- * definition of the pg_cursors view in system_views.sql
+ * build tupdesc for result tuples. This must match the definition of
+ * the pg_cursors view in system_views.sql
*/
tupdesc = CreateTemplateTupleDesc(6, false);
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "name",
nulls[1] = true;
else
values[1] = DirectFunctionCall1(textin,
- CStringGetDatum(portal->sourceText));
+ CStringGetDatum(portal->sourceText));
values[2] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_HOLD);
values[3] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_BINARY);
values[4] = BoolGetDatum(portal->cursorOptions & CURSOR_OPT_SCROLL);
SRF_RETURN_DONE(funcctx);
}
-
* To support the above policy of writing to the lowest free block,
* ltsGetFreeBlock sorts the list of free block numbers into decreasing
* order each time it is asked for a block and the list isn't currently
- * sorted. This is an efficient way to handle it because we expect cycles
+ * sorted. This is an efficient way to handle it because we expect cycles
* of releasing many blocks followed by re-using many blocks, due to
* tuplesort.c's "preread" behavior.
*
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.21 2006/03/07 23:46:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/logtape.c,v 1.22 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If blocksSorted is true then the block numbers in freeBlocks are in
* *decreasing* order, so that removing the last entry gives us the lowest
- * free block. We re-sort the blocks whenever a block is demanded; this
+ * free block. We re-sort the blocks whenever a block is demanded; this
* should be reasonably efficient given the expected usage pattern.
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
* is of length nTapes.
*/
int nTapes; /* # of logical tapes in set */
- LogicalTape tapes[1]; /* must be last in struct! */
+ LogicalTape tapes[1]; /* must be last in struct! */
};
static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
}
/*
- * Add blocknum to array, and mark the array unsorted if it's no longer
- * in decreasing order.
+ * Add blocknum to array, and mark the array unsorted if it's no longer in
+ * decreasing order.
*/
ndx = lts->nFreeBlocks++;
lts->freeBlocks[ndx] = blocknum;
- if (ndx > 0 && lts->freeBlocks[ndx-1] < blocknum)
+ if (ndx > 0 && lts->freeBlocks[ndx - 1] < blocknum)
lts->blocksSorted = false;
}
int i;
/*
- * Create top-level struct including per-tape LogicalTape structs.
- * First LogicalTape struct is already counted in sizeof(LogicalTapeSet).
+ * Create top-level struct including per-tape LogicalTape structs. First
+ * LogicalTape struct is already counted in sizeof(LogicalTapeSet).
*/
Assert(ntapes > 0);
lts = (LogicalTapeSet *) palloc(sizeof(LogicalTapeSet) +
- (ntapes - 1) * sizeof(LogicalTape));
+ (ntapes - 1) *sizeof(LogicalTape));
lts->pfile = BufFileCreateTemp(false);
lts->nFileBlocks = 0L;
lts->forgetFreeSpace = false;
/*
* Initialize per-tape structs. Note we allocate the I/O buffer and
* first-level indirect block for a tape only when it is first actually
- * written to. This avoids wasting memory space when tuplesort.c
+ * written to. This avoids wasting memory space when tuplesort.c
* overestimates the number of tapes needed.
*/
for (i = 0; i < ntapes; i++)
* Mark a logical tape set as not needing management of free space anymore.
*
* This should be called if the caller does not intend to write any more data
- * into the tape set, but is reading from un-frozen tapes. Since no more
+ * into the tape set, but is reading from un-frozen tapes. Since no more
* writes are planned, remembering free blocks is no longer useful. Setting
* this flag lets us avoid wasting time and space in ltsReleaseBlock(), which
* is not designed to handle large numbers of free blocks.
* grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
* to Knuth's figure 70 (section 5.4.2). However, Knuth is assuming that
* tape drives are expensive beasts, and in particular that there will always
- * be many more runs than tape drives. In our implementation a "tape drive"
+ * be many more runs than tape drives. In our implementation a "tape drive"
* doesn't cost much more than a few Kb of memory buffers, so we can afford
* to have lots of them. In particular, if we can have as many tape drives
* as sorted runs, we can eliminate any repeated I/O at all. In the current
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.69 2006/10/03 22:18:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.70 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * The objects we actually sort are SortTuple structs. These contain
+ * The objects we actually sort are SortTuple structs. These contain
* a pointer to the tuple proper (might be a MinimalTuple or IndexTuple),
* which is a separate palloc chunk --- we assume it is just one chunk and
* can be freed by a simple pfree(). SortTuples also contain the tuple's
* first key column in Datum/nullflag format, and an index integer.
*
* Storing the first key column lets us save heap_getattr or index_getattr
- * calls during tuple comparisons. We could extract and save all the key
+ * calls during tuple comparisons. We could extract and save all the key
* columns not just the first, but this would increase code complexity and
* overhead, and wouldn't actually save any comparison cycles in the common
* case where the first key determines the comparison result. Note that
* for a pass-by-reference datatype, datum1 points into the "tuple" storage.
*
* When sorting single Datums, the data value is represented directly by
- * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
+ * datum1/isnull1. If the datatype is pass-by-reference and isnull1 is false,
* then datum1 points to a separately palloc'd data value that is also pointed
* to by the "tuple" pointer; otherwise "tuple" is NULL.
*
* While building initial runs, tupindex holds the tuple's run number. During
* merge passes, we re-use it to hold the input tape number that each tuple in
* the heap was read from, or to hold the index of the next tuple pre-read
- * from the same tape in the case of pre-read entries. tupindex goes unused
+ * from the same tape in the case of pre-read entries. tupindex goes unused
* if the sort occurs entirely in memory.
*/
typedef struct
* qsort_arg_comparator.
*/
int (*comparetup) (const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
/*
* Function to copy a supplied input tuple into palloc'd space and set up
* state->availMem by the amount of memory space thereby released.
*/
void (*writetup) (Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
/*
* Function to read a stored tuple from tape back into memory. 'len' is
* the already-read length of the stored tuple. Create a palloc'd copy,
- * initialize tuple/datum1/isnull1 in the target SortTuple struct,
- * and decrease state->availMem by the amount of memory space consumed.
+ * initialize tuple/datum1/isnull1 in the target SortTuple struct, and
+ * decrease state->availMem by the amount of memory space consumed.
*/
void (*readtup) (Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
/*
- * This array holds the tuples now in sort memory. If we are in state
+ * This array holds the tuples now in sort memory. If we are in state
* INITIAL, the tuples are in no particular order; if we are in state
* SORTEDINMEM, the tuples are in final sorted order; in states BUILDRUNS
* and FINALMERGE, the tuples are organized in "heap" order per Algorithm
int currentRun;
/*
- * Unless otherwise noted, all pointer variables below are pointers
- * to arrays of length maxTapes, holding per-tape data.
+ * Unless otherwise noted, all pointer variables below are pointers to
+ * arrays of length maxTapes, holding per-tape data.
*/
/*
int *mergeavailslots; /* slots left for prereading each tape */
long *mergeavailmem; /* availMem for prereading each tape */
int mergefreelist; /* head of freelist of recycled slots */
- int mergefirstfree; /* first slot never used in this merge */
+ int mergefirstfree; /* first slot never used in this merge */
/*
* Variables for Algorithm D. Note that destTape is a "logical" tape
* tuplesort_begin_heap and used only by the MinimalTuple routines.
*/
TupleDesc tupDesc;
- ScanKey scanKeys; /* array of length nKeys */
- SortFunctionKind *sortFnKinds; /* array of length nKeys */
+ ScanKey scanKeys; /* array of length nKeys */
+ SortFunctionKind *sortFnKinds; /* array of length nKeys */
/*
* These variables are specific to the IndexTuple case; they are set by
};
#define COMPARETUP(state,a,b) ((*(state)->comparetup) (a, b, state))
-#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
+#define COPYTUP(state,stup,tup) ((*(state)->copytup) (state, stup, tup))
#define WRITETUP(state,tape,stup) ((*(state)->writetup) (state, tape, stup))
#define READTUP(state,stup,tape,len) ((*(state)->readtup) (state, stup, tape, len))
#define LACKMEM(state) ((state)->availMem < 0)
static unsigned int getlen(Tuplesortstate *state, int tapenum, bool eofOK);
static void markrunend(Tuplesortstate *state, int tapenum);
static int comparetup_heap(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_heap(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_heap(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_heap(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
static int comparetup_index(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_index(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_index(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_index(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
static int comparetup_datum(const SortTuple *a, const SortTuple *b,
- Tuplesortstate *state);
+ Tuplesortstate *state);
static void copytup_datum(Tuplesortstate *state, SortTuple *stup, void *tup);
static void writetup_datum(Tuplesortstate *state, int tapenum,
- SortTuple *stup);
+ SortTuple *stup);
static void readtup_datum(Tuplesortstate *state, SortTuple *stup,
- int tapenum, unsigned int len);
+ int tapenum, unsigned int len);
/*
MemoryContext oldcontext;
/*
- * Create a working memory context for this sort operation.
- * All data needed by the sort will live inside this context.
+ * Create a working memory context for this sort operation. All data
+ * needed by the sort will live inside this context.
*/
sortcontext = AllocSetContextCreate(CurrentMemoryContext,
"TupleSort",
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Make the Tuplesortstate within the per-sort context. This way,
- * we don't need a separate pfree() operation for it at shutdown.
+ * Make the Tuplesortstate within the per-sort context. This way, we
+ * don't need a separate pfree() operation for it at shutdown.
*/
oldcontext = MemoryContextSwitchTo(sortcontext);
/*
* Delete temporary "tape" files, if any.
*
- * Note: want to include this in reported total cost of sort, hence
- * need for two #ifdef TRACE_SORT sections.
+ * Note: want to include this in reported total cost of sort, hence need
+ * for two #ifdef TRACE_SORT sections.
*/
if (state->tapeset)
LogicalTapeSetClose(state->tapeset);
MemoryContextSwitchTo(oldcontext);
/*
- * Free the per-sort memory context, thereby releasing all working
- * memory, including the Tuplesortstate struct itself.
+ * Free the per-sort memory context, thereby releasing all working memory,
+ * including the Tuplesortstate struct itself.
*/
MemoryContextDelete(state->sortcontext);
}
{
/*
* We need to be sure that we do not cause LACKMEM to become true, else
- * the space management algorithm will go nuts. We assume here that
- * the memory chunk overhead associated with the memtuples array is
- * constant and so there will be no unexpected addition to what we ask
- * for. (The minimum array size established in tuplesort_begin_common
- * is large enough to force palloc to treat it as a separate chunk, so
- * this assumption should be good. But let's check it.)
+ * the space management algorithm will go nuts. We assume here that the
+ * memory chunk overhead associated with the memtuples array is constant
+ * and so there will be no unexpected addition to what we ask for. (The
+ * minimum array size established in tuplesort_begin_common is large
+ * enough to force palloc to treat it as a separate chunk, so this
+ * assumption should be good. But let's check it.)
*/
if (state->availMem <= (long) (state->memtupsize * sizeof(SortTuple)))
return false;
+
/*
* On a 64-bit machine, allowedMem could be high enough to get us into
* trouble with MaxAllocSize, too.
SortTuple stup;
/*
- * If it's a pass-by-reference value, copy it into memory we control,
- * and decrease availMem. Then call the common code.
+ * If it's a pass-by-reference value, copy it into memory we control, and
+ * decrease availMem. Then call the common code.
*/
if (isNull || state->datumTypeByVal)
{
case TSS_INITIAL:
/*
- * Save the tuple into the unsorted array. First, grow the
- * array as needed. Note that we try to grow the array when there
- * is still one free slot remaining --- if we fail, there'll still
- * be room to store the incoming tuple, and then we'll switch to
+ * Save the tuple into the unsorted array. First, grow the array
+ * as needed. Note that we try to grow the array when there is
+ * still one free slot remaining --- if we fail, there'll still be
+ * room to store the incoming tuple, and then we'll switch to
* tape-based operation.
*/
if (state->memtupcount >= state->memtupsize - 1)
case TSS_BUILDRUNS:
/*
- * Insert the tuple into the heap, with run number
- * currentRun if it can go into the current run, else run number
- * currentRun+1. The tuple can go into the current run if it is
- * >= the first not-yet-output tuple. (Actually, it could go into
- * the current run if it is >= the most recently output tuple ...
- * but that would require keeping around the tuple we last output,
- * and it's simplest to let writetup free each tuple as soon as
- * it's written.)
+ * Insert the tuple into the heap, with run number currentRun if
+ * it can go into the current run, else run number currentRun+1.
+ * The tuple can go into the current run if it is >= the first
+ * not-yet-output tuple. (Actually, it could go into the current
+ * run if it is >= the most recently output tuple ... but that
+ * would require keeping around the tuple we last output, and it's
+ * simplest to let writetup free each tuple as soon as it's
+ * written.)
*
* Note there will always be at least one tuple in the heap at
* this point; see dumptuples.
int mOrder;
/*
- * We need one tape for each merge input, plus another one for the
- * output, and each of these tapes needs buffer space. In addition
- * we want MERGE_BUFFER_SIZE workspace per input tape (but the output
- * tape doesn't count).
+ * We need one tape for each merge input, plus another one for the output,
+ * and each of these tapes needs buffer space. In addition we want
+ * MERGE_BUFFER_SIZE workspace per input tape (but the output tape doesn't
+ * count).
*
* Note: you might be thinking we need to account for the memtuples[]
- * array in this calculation, but we effectively treat that as part of
- * the MERGE_BUFFER_SIZE workspace.
+ * array in this calculation, but we effectively treat that as part of the
+ * MERGE_BUFFER_SIZE workspace.
*/
mOrder = (allowedMem - TAPE_BUFFER_OVERHEAD) /
(MERGE_BUFFER_SIZE + TAPE_BUFFER_OVERHEAD);
/*
* We must have at least 2*maxTapes slots in the memtuples[] array, else
- * we'd not have room for merge heap plus preread. It seems unlikely
- * that this case would ever occur, but be safe.
+ * we'd not have room for merge heap plus preread. It seems unlikely that
+ * this case would ever occur, but be safe.
*/
maxTapes = Min(maxTapes, state->memtupsize / 2);
/*
* Decrease availMem to reflect the space needed for tape buffers; but
- * don't decrease it to the point that we have no room for tuples.
- * (That case is only likely to occur if sorting pass-by-value Datums;
- * in all other scenarios the memtuples[] array is unlikely to occupy
- * more than half of allowedMem. In the pass-by-value case it's not
- * important to account for tuple space, so we don't care if LACKMEM
- * becomes inaccurate.)
+ * don't decrease it to the point that we have no room for tuples. (That
+ * case is only likely to occur if sorting pass-by-value Datums; in all
+ * other scenarios the memtuples[] array is unlikely to occupy more than
+ * half of allowedMem. In the pass-by-value case it's not important to
+ * account for tuple space, so we don't care if LACKMEM becomes
+ * inaccurate.)
*/
tapeSpace = maxTapes * TAPE_BUFFER_OVERHEAD;
if (tapeSpace + GetMemoryChunkSpace(state->memtuples) < state->allowedMem)
/*
* If we produced only one initial run (quite likely if the total data
* volume is between 1X and 2X workMem), we can just use that tape as the
- * finished output, rather than doing a useless merge. (This obvious
+ * finished output, rather than doing a useless merge. (This obvious
* optimization is not in Knuth's algorithm.)
*/
if (state->currentRun == 1)
memset(state->mergelast, 0,
state->maxTapes * sizeof(*state->mergelast));
state->mergefreelist = 0; /* nothing in the freelist */
- state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
+ state->mergefirstfree = activeTapes; /* 1st slot avail for preread */
/*
* Initialize space allocation to let each active input tape have an equal
/*
* Heap manipulation routines, per Knuth's Algorithm 5.2.3H.
*
- * Compare two SortTuples. If checkIndex is true, use the tuple index
+ * Compare two SortTuples. If checkIndex is true, use the tuple index
* as the front of the sort key; otherwise, no.
*/
/*
* Insert a new tuple into an empty or existing heap, maintaining the
- * heap invariant. Caller is responsible for ensuring there's room.
+ * heap invariant. Caller is responsible for ensuring there's room.
*
* Note: we assume *tuple is a temporary variable that can be scribbled on.
* For some callers, tuple actually points to a memtuples[] entry above the
int j;
/*
- * Save the tupleindex --- see notes above about writing on *tuple.
- * It's a historical artifact that tupleindex is passed as a separate
- * argument and not in *tuple, but it's notationally convenient so
- * let's leave it that way.
+ * Save the tupleindex --- see notes above about writing on *tuple. It's a
+ * historical artifact that tupleindex is passed as a separate argument
+ * and not in *tuple, but it's notationally convenient so let's leave it
+ * that way.
*/
tuple->tupindex = tupleindex;
{
/*
* This is similar to _bt_tuplecompare(), but we have already done the
- * index_getattr calls for the first column, and we need to keep track
- * of whether any null fields are present. Also see the special treatment
+ * index_getattr calls for the first column, and we need to keep track of
+ * whether any null fields are present. Also see the special treatment
* for equal keys at the end.
*/
ScanKey scanKey = state->indexScanKey;
}
else
{
- void *raddr = palloc(tuplen);
+ void *raddr = palloc(tuplen);
if (LogicalTapeRead(state->tapeset, tapenum, raddr,
tuplen) != tuplen)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.28 2006/06/27 02:51:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.29 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
switch (state->status)
{
case TSS_INMEM:
+
/*
* Grow the array as needed. Note that we try to grow the array
* when there is still one free slot remaining --- if we fail,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.97 2006/09/15 16:39:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.98 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Make a quick range check to eliminate most XIDs without looking at the
- * xip arrays. Note that this is OK even if we convert a subxact XID to
+ * xip arrays. Note that this is OK even if we convert a subxact XID to
* its parent below, because a subxact with XID < xmin has surely also got
* a parent with XID < xmin, while one with XID >= xmax must belong to a
* parent that was not yet committed at the time of this snapshot.
/*
* If the snapshot contains full subxact data, the fastest way to check
* things is just to compare the given XID against both subxact XIDs and
- * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
+ * top-level XIDs. If the snapshot overflowed, we have to use pg_subtrans
* to convert a subxact XID to its parent XID, but then we need only look
* at top-level XIDs not subxacts.
*/
xid = SubTransGetTopmostTransaction(xid);
/*
- * If xid was indeed a subxact, we might now have an xid < xmin,
- * so recheck to avoid an array scan. No point in rechecking xmax.
+ * If xid was indeed a subxact, we might now have an xid < xmin, so
+ * recheck to avoid an array scan. No point in rechecking xmax.
*/
if (TransactionIdPrecedes(xid, snapshot->xmin))
return false;
* Portions Copyright (c) 1994, Regents of the University of California
* Portions taken from FreeBSD.
*
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.123 2006/10/03 21:11:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.124 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* defaults */
static int n_connections = 10;
static int n_buffers = 50;
-static int n_fsm_pages = 20000;
+static int n_fsm_pages = 20000;
/*
* Warning messages for authentication methods
static bool chklocale(const char *locale);
static void setlocales(void);
static void usage(const char *progname);
+
#ifdef WIN32
-static int CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo);
+static int CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION * processInfo);
#endif
{
/*
* These macros define the minimum shared_buffers we want for a given
- * max_connections value, and the max_fsm_pages setting to be used for
- * a given shared_buffers value. The arrays show the settings to try.
+ * max_connections value, and the max_fsm_pages setting to be used for a
+ * given shared_buffers value. The arrays show the settings to try.
*
*/
-#define MIN_BUFS_FOR_CONNS(nconns) ((nconns) * 10 * (BLCKSZ/1024))
-#define FSM_FOR_BUFS(nbuffers) ((nbuffers) > 1000 ? 50 * (nbuffers) : 20000)
+#define MIN_BUFS_FOR_CONNS(nconns) ((nconns) * 10 * (BLCKSZ/1024))
+#define FSM_FOR_BUFS(nbuffers) ((nbuffers) > 1000 ? 50 * (nbuffers) : 20000)
static const int trial_conns[] = {
100, 50, 40, 30, 20, 10
};
/*
- * Candidate values for shared_buffers in kB. When the value is
- * divisible by 1024, we write it in MB-unit to configuration files.
+ * Candidate values for shared_buffers in kB. When the value is divisible
+ * by 1024, we write it in MB-unit to configuration files.
*/
static const int trial_bufs[] = {
32768, 28672, 24576, 20480, 16384, 12288,
};
char cmd[MAXPGPATH];
- const int connslen = sizeof(trial_conns) / sizeof(int);
- const int bufslen = sizeof(trial_bufs) / sizeof(int);
+ const int connslen = sizeof(trial_conns) / sizeof(int);
+ const int bufslen = sizeof(trial_bufs) / sizeof(int);
int i,
status,
test_conns,
test_buffs,
- test_max_fsm,
- ok_buffers = 0;
-
+ test_max_fsm,
+ ok_buffers = 0;
+
printf(_("selecting default max_connections ... "));
fflush(stdout);
n_fsm_pages = FSM_FOR_BUFS(n_buffers);
if (n_buffers % 1024 == 0)
- printf("%dMB/%d\n", n_buffers/1024, n_fsm_pages);
+ printf("%dMB/%d\n", n_buffers / 1024, n_fsm_pages);
else
printf("%dkB/%d\n", n_buffers, n_fsm_pages);
}
conflines = replace_token(conflines, "#max_connections = 100", repltok);
if (n_buffers % 1024 == 0)
- snprintf(repltok, sizeof(repltok), "shared_buffers = %dMB", n_buffers/1024);
+ snprintf(repltok, sizeof(repltok), "shared_buffers = %dMB", n_buffers / 1024);
else
snprintf(repltok, sizeof(repltok), "shared_buffers = %dkB", n_buffers);
conflines = replace_token(conflines, "#shared_buffers = 32MB", repltok);
escape_quotes(lc_time));
conflines = replace_token(conflines, "#lc_time = 'C'", repltok);
- switch (locale_date_order(lc_time)) {
+ switch (locale_date_order(lc_time))
+ {
case DATEORDER_YMD:
strcpy(repltok, "datestyle = 'iso, ymd'");
break;
*
* Note: this is used to process both postgresql.conf entries and SQL
* string literals. Since postgresql.conf strings are defined to treat
- * backslashes as escapes, we have to double backslashes here. Hence,
+ * backslashes as escapes, we have to double backslashes here. Hence,
* when using this for a SQL string literal, use E'' syntax.
*
* We do not need to worry about encoding considerations because all
}
#ifdef WIN32
-typedef BOOL (WINAPI *__CreateRestrictedToken)(HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
-#define DISABLE_MAX_PRIVILEGE 0x1
+typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
+
+#define DISABLE_MAX_PRIVILEGE 0x1
/*
* Create a restricted token and execute the specified process with it.
* On NT4, or any other system not containing the required functions, will
* NOT execute anything.
*/
-static int
-CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo)
+static int
+CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION * processInfo)
{
- BOOL b;
- STARTUPINFO si;
- HANDLE origToken;
- HANDLE restrictedToken;
- SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
- SID_AND_ATTRIBUTES dropSids[2];
- __CreateRestrictedToken _CreateRestrictedToken = NULL;
- HANDLE Advapi32Handle;
-
- ZeroMemory(&si, sizeof(si));
- si.cb = sizeof(si);
-
- Advapi32Handle = LoadLibrary("ADVAPI32.DLL");
- if (Advapi32Handle != NULL)
- {
- _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
- }
-
- if (_CreateRestrictedToken == NULL)
- {
- fprintf(stderr,"WARNING: Unable to create restricted tokens on this platform\n");
- if (Advapi32Handle != NULL)
- FreeLibrary(Advapi32Handle);
- return 0;
- }
-
- /* Open the current token to use as a base for the restricted one */
- if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
- {
- fprintf(stderr, "Failed to open process token: %lu\n", GetLastError());
- return 0;
- }
-
- /* Allocate list of SIDs to remove */
- ZeroMemory(&dropSids, sizeof(dropSids));
- if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0,0,0,0,0,
- 0, &dropSids[0].Sid) ||
- !AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0,0,0,0,0,
- 0, &dropSids[1].Sid))
- {
- fprintf(stderr,"Failed to allocate SIDs: %lu\n", GetLastError());
- return 0;
- }
-
- b = _CreateRestrictedToken(origToken,
- DISABLE_MAX_PRIVILEGE,
- sizeof(dropSids)/sizeof(dropSids[0]),
- dropSids,
- 0, NULL,
- 0, NULL,
- &restrictedToken);
-
- FreeSid(dropSids[1].Sid);
- FreeSid(dropSids[0].Sid);
- CloseHandle(origToken);
- FreeLibrary(Advapi32Handle);
-
- if (!b)
- {
- fprintf(stderr,"Failed to create restricted token: %lu\n", GetLastError());
- return 0;
- }
-
- return CreateProcessAsUser(restrictedToken, NULL, cmd, NULL, NULL, TRUE, 0, NULL, NULL, &si, processInfo);
+ BOOL b;
+ STARTUPINFO si;
+ HANDLE origToken;
+ HANDLE restrictedToken;
+ SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
+ SID_AND_ATTRIBUTES dropSids[2];
+ __CreateRestrictedToken _CreateRestrictedToken = NULL;
+ HANDLE Advapi32Handle;
+
+ ZeroMemory(&si, sizeof(si));
+ si.cb = sizeof(si);
+
+ Advapi32Handle = LoadLibrary("ADVAPI32.DLL");
+ if (Advapi32Handle != NULL)
+ {
+ _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
+ }
+
+ if (_CreateRestrictedToken == NULL)
+ {
+ fprintf(stderr, "WARNING: Unable to create restricted tokens on this platform\n");
+ if (Advapi32Handle != NULL)
+ FreeLibrary(Advapi32Handle);
+ return 0;
+ }
+
+ /* Open the current token to use as a base for the restricted one */
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
+ {
+ fprintf(stderr, "Failed to open process token: %lu\n", GetLastError());
+ return 0;
+ }
+
+ /* Allocate list of SIDs to remove */
+ ZeroMemory(&dropSids, sizeof(dropSids));
+ if (!AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
+ 0, &dropSids[0].Sid) ||
+ !AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
+ 0, &dropSids[1].Sid))
+ {
+ fprintf(stderr, "Failed to allocate SIDs: %lu\n", GetLastError());
+ return 0;
+ }
+
+ b = _CreateRestrictedToken(origToken,
+ DISABLE_MAX_PRIVILEGE,
+ sizeof(dropSids) / sizeof(dropSids[0]),
+ dropSids,
+ 0, NULL,
+ 0, NULL,
+ &restrictedToken);
+
+ FreeSid(dropSids[1].Sid);
+ FreeSid(dropSids[0].Sid);
+ CloseHandle(origToken);
+ FreeLibrary(Advapi32Handle);
+
+ if (!b)
+ {
+ fprintf(stderr, "Failed to create restricted token: %lu\n", GetLastError());
+ return 0;
+ }
+
+ return CreateProcessAsUser(restrictedToken, NULL, cmd, NULL, NULL, TRUE, 0, NULL, NULL, &si, processInfo);
}
#endif
{"lc-messages", required_argument, NULL, 7},
{"no-locale", no_argument, NULL, 8},
{"auth", required_argument, NULL, 'A'},
- {"pwprompt", no_argument, NULL, 'W'},
+ {"pwprompt", no_argument, NULL, 'W'},
{"pwfile", required_argument, NULL, 9},
{"username", required_argument, NULL, 'U'},
{"help", no_argument, NULL, '?'},
ret;
int option_index;
char *short_version;
- char *effective_user;
+ char *effective_user;
char *pgdenv; /* PGDATA value gotten from and sent to
* environment */
char bin_dir[MAXPGPATH];
char *pg_data_native;
+
#ifdef WIN32
- char *restrict_env;
+ char *restrict_env;
#endif
static const char *subdirs[] = {
"global",
canonicalize_path(pg_data);
#ifdef WIN32
- /*
- * Before we execute another program, make sure that we are running with a
- * restricted token. If not, re-execute ourselves with one.
- */
- if ((restrict_env = getenv("PG_RESTRICT_EXEC")) == NULL
- || strcmp(restrict_env,"1") != 0)
- {
- PROCESS_INFORMATION pi;
- char *cmdline;
-
- ZeroMemory(&pi, sizeof(pi));
+ /*
+ * Before we execute another program, make sure that we are running with a
+ * restricted token. If not, re-execute ourselves with one.
+ */
- cmdline = xstrdup(GetCommandLine());
+ if ((restrict_env = getenv("PG_RESTRICT_EXEC")) == NULL
+ || strcmp(restrict_env, "1") != 0)
+ {
+ PROCESS_INFORMATION pi;
+ char *cmdline;
+
+ ZeroMemory(&pi, sizeof(pi));
+
+ cmdline = xstrdup(GetCommandLine());
putenv("PG_RESTRICT_EXEC=1");
-
- if (!CreateRestrictedProcess(cmdline, &pi))
- {
- fprintf(stderr,"Failed to re-exec with restricted token: %lu.\n", GetLastError());
- }
- else
- {
- /* Successfully re-execed. Now wait for child process to capture exitcode. */
- DWORD x;
-
- CloseHandle(pi.hThread);
- WaitForSingleObject(pi.hProcess, INFINITE);
-
- if (!GetExitCodeProcess(pi.hProcess, &x))
- {
- fprintf(stderr,"Failed to get exit code from subprocess: %lu\n", GetLastError());
- exit(1);
- }
- exit(x);
- }
- }
+
+ if (!CreateRestrictedProcess(cmdline, &pi))
+ {
+ fprintf(stderr, "Failed to re-exec with restricted token: %lu.\n", GetLastError());
+ }
+ else
+ {
+ /*
+ * Successfully re-execed. Now wait for child process to capture
+ * exitcode.
+ */
+ DWORD x;
+
+ CloseHandle(pi.hThread);
+ WaitForSingleObject(pi.hProcess, INFINITE);
+
+ if (!GetExitCodeProcess(pi.hProcess, &x))
+ {
+ fprintf(stderr, "Failed to get exit code from subprocess: %lu\n", GetLastError());
+ exit(1);
+ }
+ exit(x);
+ }
+ }
#endif
/*
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.21 2006/09/27 16:19:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.22 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *ptr;
/*
- * GetShortPathName() will fail if the path does not exist, or short names
- * are disabled on this file system. In both cases, we just return the
- * original path. This is particularly useful for --sysconfdir, which
- * might not exist.
+ * GetShortPathName() will fail if the path does not exist, or short names
+ * are disabled on this file system. In both cases, we just return the
+ * original path. This is particularly useful for --sysconfdir, which
+ * might not exist.
*/
GetShortPathName(path, path, MAXPGPATH - 1);
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.72 2006/09/24 16:59:45 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.73 2006/10/04 00:30:04 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void WINAPI pgwin32_ServiceHandler(DWORD);
static void WINAPI pgwin32_ServiceMain(DWORD, LPTSTR *);
static void pgwin32_doRunAsService(void);
-static int CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo);
+static int CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION * processInfo);
#endif
static pgpid_t get_pgpid(void);
static char **readfile(const char *path);
start_postmaster(void)
{
char cmd[MAXPGPATH];
+
#ifndef WIN32
+
/*
* Since there might be quotes to handle here, it is easier simply to pass
* everything to a shell to process them.
snprintf(cmd, MAXPGPATH, "%s\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1 &%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, log_file, SYSTEMQUOTE);
- else
+ else
snprintf(cmd, MAXPGPATH, "%s\"%s\" %s%s < \"%s\" 2>&1 &%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, SYSTEMQUOTE);
return system(cmd);
-
-#else /* WIN32 */
- /*
- * On win32 we don't use system(). So we don't need to use &
- * (which would be START /B on win32). However, we still call the shell
- * (CMD.EXE) with it to handle redirection etc.
- */
- PROCESS_INFORMATION pi;
-
- if (log_file != NULL)
- snprintf(cmd, MAXPGPATH, "CMD /C %s\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1%s",
+#else /* WIN32 */
+
+ /*
+ * On win32 we don't use system(). So we don't need to use & (which would
+ * be START /B on win32). However, we still call the shell (CMD.EXE) with
+ * it to handle redirection etc.
+ */
+ PROCESS_INFORMATION pi;
+
+ if (log_file != NULL)
+ snprintf(cmd, MAXPGPATH, "CMD /C %s\"%s\" %s%s < \"%s\" >> \"%s\" 2>&1%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, log_file, SYSTEMQUOTE);
- else
- snprintf(cmd, MAXPGPATH, "CMD /C %s\"%s\" %s%s < \"%s\" 2>&1%s",
+ else
+ snprintf(cmd, MAXPGPATH, "CMD /C %s\"%s\" %s%s < \"%s\" 2>&1%s",
SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
DEVNULL, SYSTEMQUOTE);
- if (!CreateRestrictedProcess(cmd, &pi))
- return GetLastError();
- CloseHandle(pi.hProcess);
- CloseHandle(pi.hThread);
- return 0;
-#endif /* WIN32 */
+ if (!CreateRestrictedProcess(cmd, &pi))
+ return GetLastError();
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+ return 0;
+#endif /* WIN32 */
}
* also load the couple of functions that *do* exist in minwg headers but not
* on NT4. That way, we don't break on NT4.
*/
-typedef BOOL (WINAPI *__CreateRestrictedToken)(HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
-typedef BOOL (WINAPI *__IsProcessInJob)(HANDLE, HANDLE, PBOOL);
-typedef HANDLE (WINAPI *__CreateJobObject)(LPSECURITY_ATTRIBUTES, LPCTSTR);
-typedef BOOL (WINAPI *__SetInformationJobObject)(HANDLE, JOBOBJECTINFOCLASS, LPVOID, DWORD);
-typedef BOOL (WINAPI *__AssignProcessToJobObject)(HANDLE, HANDLE);
-typedef BOOL (WINAPI *__QueryInformationJobObject)(HANDLE, JOBOBJECTINFOCLASS, LPVOID, DWORD, LPDWORD);
+typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE);
+typedef BOOL(WINAPI * __IsProcessInJob) (HANDLE, HANDLE, PBOOL);
+typedef HANDLE(WINAPI * __CreateJobObject) (LPSECURITY_ATTRIBUTES, LPCTSTR);
+typedef BOOL(WINAPI * __SetInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS, LPVOID, DWORD);
+typedef BOOL(WINAPI * __AssignProcessToJobObject) (HANDLE, HANDLE);
+typedef BOOL(WINAPI * __QueryInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS, LPVOID, DWORD, LPDWORD);
/* Windows API define missing from MingW headers */
-#define DISABLE_MAX_PRIVILEGE 0x1
+#define DISABLE_MAX_PRIVILEGE 0x1
/*
* Create a restricted token, a job object sandbox, and execute the specified
* automatically destroyed when pg_ctl exits.
*/
static int
-CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo)
+CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION * processInfo)
{
- int r;
- BOOL b;
- STARTUPINFO si;
- HANDLE origToken;
- HANDLE restrictedToken;
- SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
- SID_AND_ATTRIBUTES dropSids[2];
-
- /* Functions loaded dynamically */
- __CreateRestrictedToken _CreateRestrictedToken = NULL;
- __IsProcessInJob _IsProcessInJob = NULL;
- __CreateJobObject _CreateJobObject = NULL;
- __SetInformationJobObject _SetInformationJobObject = NULL;
- __AssignProcessToJobObject _AssignProcessToJobObject = NULL;
- __QueryInformationJobObject _QueryInformationJobObject = NULL;
- HANDLE Kernel32Handle;
- HANDLE Advapi32Handle;
-
- ZeroMemory(&si, sizeof(si));
- si.cb = sizeof(si);
-
- Advapi32Handle = LoadLibrary("ADVAPI32.DLL");
- if (Advapi32Handle != NULL)
- {
- _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
- }
-
- if (_CreateRestrictedToken == NULL)
- {
- /* NT4 doesn't have CreateRestrictedToken, so just call ordinary CreateProcess */
- write_stderr("WARNING: Unable to create restricted tokens on this platform\n");
- if (Advapi32Handle != NULL)
- FreeLibrary(Advapi32Handle);
- return CreateProcess(NULL, cmd, NULL, NULL, FALSE, 0, NULL, NULL, &si, processInfo);
- }
-
- /* Open the current token to use as a base for the restricted one */
- if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
- {
- write_stderr("Failed to open process token: %lu\n", GetLastError());
- return 0;
- }
-
- /* Allocate list of SIDs to remove */
- ZeroMemory(&dropSids, sizeof(dropSids));
- if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0,0,0,0,0,
- 0, &dropSids[0].Sid) ||
- !AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0,0,0,0,0,
- 0, &dropSids[1].Sid))
- {
- write_stderr("Failed to allocate SIDs: %lu\n", GetLastError());
- return 0;
- }
-
- b = _CreateRestrictedToken(origToken,
- DISABLE_MAX_PRIVILEGE,
- sizeof(dropSids)/sizeof(dropSids[0]),
- dropSids,
- 0, NULL,
- 0, NULL,
- &restrictedToken);
-
- FreeSid(dropSids[1].Sid);
- FreeSid(dropSids[0].Sid);
- CloseHandle(origToken);
- FreeLibrary(Advapi32Handle);
-
- if (!b)
- {
- write_stderr("Failed to create restricted token: %lu\n", GetLastError());
- return 0;
- }
-
- r = CreateProcessAsUser(restrictedToken, NULL, cmd, NULL, NULL, TRUE, CREATE_SUSPENDED, NULL, NULL, &si, processInfo);
-
- Kernel32Handle = LoadLibrary("KERNEL32.DLL");
- if (Kernel32Handle != NULL)
- {
- _IsProcessInJob = (__IsProcessInJob) GetProcAddress(Kernel32Handle, "IsProcessInJob");
- _CreateJobObject = (__CreateJobObject) GetProcAddress(Kernel32Handle, "CreateJobObjectA");
- _SetInformationJobObject = (__SetInformationJobObject) GetProcAddress(Kernel32Handle, "SetInformationJobObject");
- _AssignProcessToJobObject = (__AssignProcessToJobObject) GetProcAddress(Kernel32Handle, "AssignProcessToJobObject");
- _QueryInformationJobObject = (__QueryInformationJobObject) GetProcAddress(Kernel32Handle, "QueryInformationJobObject");
- }
-
- /* Verify that we found all functions */
- if (_IsProcessInJob == NULL || _CreateJobObject == NULL || _SetInformationJobObject == NULL || _AssignProcessToJobObject == NULL || _QueryInformationJobObject == NULL)
- {
- /* IsProcessInJob() is not available on < WinXP, so there is no need to log the error every time in that case */
+ int r;
+ BOOL b;
+ STARTUPINFO si;
+ HANDLE origToken;
+ HANDLE restrictedToken;
+ SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
+ SID_AND_ATTRIBUTES dropSids[2];
+
+ /* Functions loaded dynamically */
+ __CreateRestrictedToken _CreateRestrictedToken = NULL;
+ __IsProcessInJob _IsProcessInJob = NULL;
+ __CreateJobObject _CreateJobObject = NULL;
+ __SetInformationJobObject _SetInformationJobObject = NULL;
+ __AssignProcessToJobObject _AssignProcessToJobObject = NULL;
+ __QueryInformationJobObject _QueryInformationJobObject = NULL;
+ HANDLE Kernel32Handle;
+ HANDLE Advapi32Handle;
+
+ ZeroMemory(&si, sizeof(si));
+ si.cb = sizeof(si);
+
+ Advapi32Handle = LoadLibrary("ADVAPI32.DLL");
+ if (Advapi32Handle != NULL)
+ {
+ _CreateRestrictedToken = (__CreateRestrictedToken) GetProcAddress(Advapi32Handle, "CreateRestrictedToken");
+ }
+
+ if (_CreateRestrictedToken == NULL)
+ {
+ /*
+ * NT4 doesn't have CreateRestrictedToken, so just call ordinary
+ * CreateProcess
+ */
+ write_stderr("WARNING: Unable to create restricted tokens on this platform\n");
+ if (Advapi32Handle != NULL)
+ FreeLibrary(Advapi32Handle);
+ return CreateProcess(NULL, cmd, NULL, NULL, FALSE, 0, NULL, NULL, &si, processInfo);
+ }
+
+ /* Open the current token to use as a base for the restricted one */
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken))
+ {
+ write_stderr("Failed to open process token: %lu\n", GetLastError());
+ return 0;
+ }
+
+ /* Allocate list of SIDs to remove */
+ ZeroMemory(&dropSids, sizeof(dropSids));
+ if (!AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
+ 0, &dropSids[0].Sid) ||
+ !AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
+ 0, &dropSids[1].Sid))
+ {
+ write_stderr("Failed to allocate SIDs: %lu\n", GetLastError());
+ return 0;
+ }
+
+ b = _CreateRestrictedToken(origToken,
+ DISABLE_MAX_PRIVILEGE,
+ sizeof(dropSids) / sizeof(dropSids[0]),
+ dropSids,
+ 0, NULL,
+ 0, NULL,
+ &restrictedToken);
+
+ FreeSid(dropSids[1].Sid);
+ FreeSid(dropSids[0].Sid);
+ CloseHandle(origToken);
+ FreeLibrary(Advapi32Handle);
+
+ if (!b)
+ {
+ write_stderr("Failed to create restricted token: %lu\n", GetLastError());
+ return 0;
+ }
+
+ r = CreateProcessAsUser(restrictedToken, NULL, cmd, NULL, NULL, TRUE, CREATE_SUSPENDED, NULL, NULL, &si, processInfo);
+
+ Kernel32Handle = LoadLibrary("KERNEL32.DLL");
+ if (Kernel32Handle != NULL)
+ {
+ _IsProcessInJob = (__IsProcessInJob) GetProcAddress(Kernel32Handle, "IsProcessInJob");
+ _CreateJobObject = (__CreateJobObject) GetProcAddress(Kernel32Handle, "CreateJobObjectA");
+ _SetInformationJobObject = (__SetInformationJobObject) GetProcAddress(Kernel32Handle, "SetInformationJobObject");
+ _AssignProcessToJobObject = (__AssignProcessToJobObject) GetProcAddress(Kernel32Handle, "AssignProcessToJobObject");
+ _QueryInformationJobObject = (__QueryInformationJobObject) GetProcAddress(Kernel32Handle, "QueryInformationJobObject");
+ }
+
+ /* Verify that we found all functions */
+ if (_IsProcessInJob == NULL || _CreateJobObject == NULL || _SetInformationJobObject == NULL || _AssignProcessToJobObject == NULL || _QueryInformationJobObject == NULL)
+ {
+ /*
+ * IsProcessInJob() is not available on < WinXP, so there is no need
+ * to log the error every time in that case
+ */
OSVERSIONINFO osv;
osv.dwOSVersionInfoSize = sizeof(osv);
- if (!GetVersionEx(&osv) || /* could not get version */
- (osv.dwMajorVersion == 5 && osv.dwMinorVersion > 0) || /* 5.1=xp, 5.2=2003, etc */
- osv.dwMajorVersion > 5) /* anything newer should have the API */
- /* Log error if we can't get version, or if we're on WinXP/2003 or newer */
+ if (!GetVersionEx(&osv) || /* could not get version */
+ (osv.dwMajorVersion == 5 && osv.dwMinorVersion > 0) || /* 5.1=xp, 5.2=2003, etc */
+ osv.dwMajorVersion > 5) /* anything newer should have the API */
+
+ /*
+ * Log error if we can't get version, or if we're on WinXP/2003 or
+ * newer
+ */
write_stderr("WARNING: Unable to locate all job object functions in system API!\n");
- }
- else
- {
- BOOL inJob;
- if (_IsProcessInJob(processInfo->hProcess, NULL, &inJob))
- {
- if (!inJob)
- {
- /* Job objects are working, and the new process isn't in one, so we can create one safely.
- If any problems show up when setting it, we're going to ignore them. */
- HANDLE job;
- char jobname[128];
-
- sprintf(jobname,"PostgreSQL_%lu", processInfo->dwProcessId);
-
- job = _CreateJobObject(NULL, jobname);
- if (job)
- {
- JOBOBJECT_BASIC_LIMIT_INFORMATION basicLimit;
- JOBOBJECT_BASIC_UI_RESTRICTIONS uiRestrictions;
- JOBOBJECT_SECURITY_LIMIT_INFORMATION securityLimit;
-
- ZeroMemory(&basicLimit, sizeof(basicLimit));
- ZeroMemory(&uiRestrictions, sizeof(uiRestrictions));
- ZeroMemory(&securityLimit, sizeof(securityLimit));
-
- basicLimit.LimitFlags = JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION | JOB_OBJECT_LIMIT_PRIORITY_CLASS;
- basicLimit.PriorityClass = NORMAL_PRIORITY_CLASS;
- _SetInformationJobObject(job, JobObjectBasicLimitInformation, &basicLimit, sizeof(basicLimit));
-
- uiRestrictions.UIRestrictionsClass = JOB_OBJECT_UILIMIT_DESKTOP | JOB_OBJECT_UILIMIT_DISPLAYSETTINGS |
- JOB_OBJECT_UILIMIT_EXITWINDOWS | JOB_OBJECT_UILIMIT_HANDLES | JOB_OBJECT_UILIMIT_READCLIPBOARD |
- JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS | JOB_OBJECT_UILIMIT_WRITECLIPBOARD;
- _SetInformationJobObject(job, JobObjectBasicUIRestrictions, &uiRestrictions, sizeof(uiRestrictions));
-
- securityLimit.SecurityLimitFlags = JOB_OBJECT_SECURITY_NO_ADMIN | JOB_OBJECT_SECURITY_ONLY_TOKEN;
- securityLimit.JobToken = restrictedToken;
- _SetInformationJobObject(job, JobObjectSecurityLimitInformation, &securityLimit, sizeof(securityLimit));
-
- _AssignProcessToJobObject(job, processInfo->hProcess);
- }
- }
- }
- }
-
- CloseHandle(restrictedToken);
-
- ResumeThread(processInfo->hThread);
-
- FreeLibrary(Kernel32Handle);
-
- /*
+ }
+ else
+ {
+ BOOL inJob;
+
+ if (_IsProcessInJob(processInfo->hProcess, NULL, &inJob))
+ {
+ if (!inJob)
+ {
+ /*
+ * Job objects are working, and the new process isn't in one,
+ * so we can create one safely. If any problems show up when
+ * setting it, we're going to ignore them.
+ */
+ HANDLE job;
+ char jobname[128];
+
+ sprintf(jobname, "PostgreSQL_%lu", processInfo->dwProcessId);
+
+ job = _CreateJobObject(NULL, jobname);
+ if (job)
+ {
+ JOBOBJECT_BASIC_LIMIT_INFORMATION basicLimit;
+ JOBOBJECT_BASIC_UI_RESTRICTIONS uiRestrictions;
+ JOBOBJECT_SECURITY_LIMIT_INFORMATION securityLimit;
+
+ ZeroMemory(&basicLimit, sizeof(basicLimit));
+ ZeroMemory(&uiRestrictions, sizeof(uiRestrictions));
+ ZeroMemory(&securityLimit, sizeof(securityLimit));
+
+ basicLimit.LimitFlags = JOB_OBJECT_LIMIT_DIE_ON_UNHANDLED_EXCEPTION | JOB_OBJECT_LIMIT_PRIORITY_CLASS;
+ basicLimit.PriorityClass = NORMAL_PRIORITY_CLASS;
+ _SetInformationJobObject(job, JobObjectBasicLimitInformation, &basicLimit, sizeof(basicLimit));
+
+ uiRestrictions.UIRestrictionsClass = JOB_OBJECT_UILIMIT_DESKTOP | JOB_OBJECT_UILIMIT_DISPLAYSETTINGS |
+ JOB_OBJECT_UILIMIT_EXITWINDOWS | JOB_OBJECT_UILIMIT_HANDLES | JOB_OBJECT_UILIMIT_READCLIPBOARD |
+ JOB_OBJECT_UILIMIT_SYSTEMPARAMETERS | JOB_OBJECT_UILIMIT_WRITECLIPBOARD;
+ _SetInformationJobObject(job, JobObjectBasicUIRestrictions, &uiRestrictions, sizeof(uiRestrictions));
+
+ securityLimit.SecurityLimitFlags = JOB_OBJECT_SECURITY_NO_ADMIN | JOB_OBJECT_SECURITY_ONLY_TOKEN;
+ securityLimit.JobToken = restrictedToken;
+ _SetInformationJobObject(job, JobObjectSecurityLimitInformation, &securityLimit, sizeof(securityLimit));
+
+ _AssignProcessToJobObject(job, processInfo->hProcess);
+ }
+ }
+ }
+ }
+
+ CloseHandle(restrictedToken);
+
+ ResumeThread(processInfo->hThread);
+
+ FreeLibrary(Kernel32Handle);
+
+ /*
* We intentionally don't close the job object handle, because we want the
* object to live on until pg_ctl shuts down.
*/
- return r;
+ return r;
}
-
#endif
static void
do_wait = false;
}
- if (pg_data)
- {
- snprintf(def_postopts_file, MAXPGPATH, "%s/postmaster.opts.default", pg_data);
- snprintf(postopts_file, MAXPGPATH, "%s/postmaster.opts", pg_data);
- snprintf(pid_file, MAXPGPATH, "%s/postmaster.pid", pg_data);
- snprintf(conf_file, MAXPGPATH, "%s/postgresql.conf", pg_data);
- }
+ if (pg_data)
+ {
+ snprintf(def_postopts_file, MAXPGPATH, "%s/postmaster.opts.default", pg_data);
+ snprintf(postopts_file, MAXPGPATH, "%s/postmaster.opts", pg_data);
+ snprintf(pid_file, MAXPGPATH, "%s/postmaster.pid", pg_data);
+ snprintf(conf_file, MAXPGPATH, "%s/postgresql.conf", pg_data);
+ }
switch (ctl_command)
{
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.31 2006/09/08 18:05:35 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.32 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* standard_conforming_strings settings.
*
* This is essentially equivalent to libpq's PQescapeStringInternal,
- * except for the output buffer structure. We need it in situations
+ * except for the output buffer structure. We need it in situations
* where we do not have a PGconn available. Where we do,
* appendStringLiteralConn is a better choice.
*/
while (*source != '\0')
{
- char c = *source;
- int len;
- int i;
+ char c = *source;
+ int len;
+ int i;
/* Fast path for plain ASCII */
if (!IS_HIGHBIT_SET(c))
/*
* If we hit premature end of string (ie, incomplete multibyte
- * character), try to pad out to the correct length with spaces.
- * We may not be able to pad completely, but we will always be able
- * to insert at least one pad space (since we'd not have quoted a
+ * character), try to pad out to the correct length with spaces. We
+ * may not be able to pad completely, but we will always be able to
+ * insert at least one pad space (since we'd not have quoted a
* multibyte character). This should be enough to make a string that
* the server will error out on.
*/
if (i < len)
{
- char *stop = buf->data + buf->maxlen - 2;
+ char *stop = buf->data + buf->maxlen - 2;
for (; i < len; i++)
{
void
appendStringLiteralConn(PQExpBuffer buf, const char *str, PGconn *conn)
{
- size_t length = strlen(str);
+ size_t length = strlen(str);
/*
* XXX This is a kluge to silence escape_string_warning in our utility
if (strchr(str, '\\') != NULL && PQserverVersion(conn) >= 80100)
{
/* ensure we are not adjacent to an identifier */
- if (buf->len > 0 && buf->data[buf->len-1] != ' ')
+ if (buf->len > 0 && buf->data[buf->len - 1] != ' ')
appendPQExpBufferChar(buf, ' ');
appendPQExpBufferChar(buf, ESCAPE_STRING_SYNTAX);
appendStringLiteral(buf, str, PQclientEncoding(conn), false);
if (strcmp(type, "TABLE") == 0 || strcmp(type, "SEQUENCE") == 0)
{
CONVERT_PRIV('r', "SELECT");
-
+
if (strcmp(type, "SEQUENCE") == 0)
/* sequence only */
CONVERT_PRIV('U', "USAGE");
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.17 2006/05/28 21:13:54 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.18 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern const char *fmtId(const char *identifier);
extern void appendStringLiteral(PQExpBuffer buf, const char *str,
- int encoding, bool std_strings);
+ int encoding, bool std_strings);
extern void appendStringLiteralConn(PQExpBuffer buf, const char *str,
- PGconn *conn);
+ PGconn *conn);
extern void appendStringLiteralDQ(PQExpBuffer buf, const char *str,
const char *dqprefix);
extern int parse_version(const char *versionString);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.42 2006/08/01 18:21:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.43 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int suppressDumpWarnings; /* Suppress output of WARNING entries
* to stderr */
- bool single_txn;
+ bool single_txn;
} RestoreOptions;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.135 2006/08/01 18:21:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.136 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
_printTocEntry(AH, te, ropt, false, false);
defnDumped = true;
- /* If we could not create a table, ignore the respective TABLE DATA if
- * -X no-data-for-failed-tables is given */
- if (ropt->noDataForFailedTables && AH->lastErrorTE == te && strcmp (te->desc, "TABLE") == 0) {
- TocEntry *tes, *last;
-
- ahlog (AH, 1, "table %s could not be created, will not restore its data\n", te->tag);
-
- for (last = te, tes = te->next; tes != AH->toc; last = tes, tes = tes->next) {
- if (strcmp (tes->desc, "TABLE DATA") == 0 && strcmp (tes->tag, te->tag) == 0 &&
- strcmp (tes->namespace ? tes->namespace : "", te->namespace ? te->namespace : "") == 0) {
- /* remove this node */
- last->next = tes->next;
- break;
+ /*
+ * If we could not create a table, ignore the respective TABLE
+ * DATA if -X no-data-for-failed-tables is given
+ */
+ if (ropt->noDataForFailedTables && AH->lastErrorTE == te && strcmp(te->desc, "TABLE") == 0)
+ {
+ TocEntry *tes,
+ *last;
+
+ ahlog(AH, 1, "table %s could not be created, will not restore its data\n", te->tag);
+
+ for (last = te, tes = te->next; tes != AH->toc; last = tes, tes = tes->next)
+ {
+ if (strcmp(tes->desc, "TABLE DATA") == 0 && strcmp(tes->tag, te->tag) == 0 &&
+ strcmp(tes->namespace ? tes->namespace : "", te->namespace ? te->namespace : "") == 0)
+ {
+ /* remove this node */
+ last->next = tes->next;
+ break;
}
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.72 2006/07/18 17:42:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.73 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct _tocEntry;
struct _restoreList;
-typedef void (*ClosePtr) (struct _archiveHandle *AH);
-typedef void (*ArchiveEntryPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-
-typedef void (*StartDataPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-typedef size_t (*WriteDataPtr) (struct _archiveHandle *AH, const void *data, size_t dLen);
-typedef void (*EndDataPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-
-typedef void (*StartBlobsPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-typedef void (*StartBlobPtr) (struct _archiveHandle *AH, struct _tocEntry *te, Oid oid);
-typedef void (*EndBlobPtr) (struct _archiveHandle *AH, struct _tocEntry *te, Oid oid);
-typedef void (*EndBlobsPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-
-typedef int (*WriteBytePtr) (struct _archiveHandle *AH, const int i);
-typedef int (*ReadBytePtr) (struct _archiveHandle *AH);
-typedef size_t (*WriteBufPtr) (struct _archiveHandle *AH, const void *c, size_t len);
-typedef size_t (*ReadBufPtr) (struct _archiveHandle *AH, void *buf, size_t len);
-typedef void (*SaveArchivePtr) (struct _archiveHandle *AH);
-typedef void (*WriteExtraTocPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-typedef void (*ReadExtraTocPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-typedef void (*PrintExtraTocPtr) (struct _archiveHandle *AH, struct _tocEntry *te);
-typedef void (*PrintTocDataPtr) (struct _archiveHandle *AH, struct _tocEntry *te, RestoreOptions *ropt);
-
-typedef size_t (*CustomOutPtr) (struct _archiveHandle *AH, const void *buf, size_t len);
+typedef void (*ClosePtr) (struct _archiveHandle * AH);
+typedef void (*ArchiveEntryPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+
+typedef void (*StartDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+typedef size_t (*WriteDataPtr) (struct _archiveHandle * AH, const void *data, size_t dLen);
+typedef void (*EndDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+
+typedef void (*StartBlobsPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+typedef void (*StartBlobPtr) (struct _archiveHandle * AH, struct _tocEntry * te, Oid oid);
+typedef void (*EndBlobPtr) (struct _archiveHandle * AH, struct _tocEntry * te, Oid oid);
+typedef void (*EndBlobsPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+
+typedef int (*WriteBytePtr) (struct _archiveHandle * AH, const int i);
+typedef int (*ReadBytePtr) (struct _archiveHandle * AH);
+typedef size_t (*WriteBufPtr) (struct _archiveHandle * AH, const void *c, size_t len);
+typedef size_t (*ReadBufPtr) (struct _archiveHandle * AH, void *buf, size_t len);
+typedef void (*SaveArchivePtr) (struct _archiveHandle * AH);
+typedef void (*WriteExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+typedef void (*ReadExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+typedef void (*PrintExtraTocPtr) (struct _archiveHandle * AH, struct _tocEntry * te);
+typedef void (*PrintTocDataPtr) (struct _archiveHandle * AH, struct _tocEntry * te, RestoreOptions *ropt);
+
+typedef size_t (*CustomOutPtr) (struct _archiveHandle * AH, const void *buf, size_t len);
typedef enum _archiveMode
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.35 2006/07/14 14:52:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.36 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
"could not read from input file: end of file\n");
else
die_horribly(AH, modulename,
- "could not read from input file: %s\n", strerror(errno));
+ "could not read from input file: %s\n", strerror(errno));
}
ctx->filePos += blkLen;
"could not read from input file: end of file\n");
else
die_horribly(AH, modulename,
- "could not read from input file: %s\n", strerror(errno));
+ "could not read from input file: %s\n", strerror(errno));
}
ctx->filePos += blkLen;
* Implements the basic DB functions used by the archiver.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.74 2006/09/27 15:41:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.75 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
isEnd = (strcmp(AH->pgCopyBuf->data, "\\.\n") == 0);
/*
- * Note that we drop the data on the floor if libpq has failed to
- * enter COPY mode; this allows us to behave reasonably when trying
- * to continue after an error in a COPY command.
+ * Note that we drop the data on the floor if libpq has failed to enter
+ * COPY mode; this allows us to behave reasonably when trying to continue
+ * after an error in a COPY command.
*/
if (AH->pgCopyIn &&
PQputCopyData(AH->connection, AH->pgCopyBuf->data,
{
/*
* If libpq is in CopyIn mode *or* if the archive structure shows we
- * are sending COPY data, treat the data as COPY data. The pgCopyIn
+ * are sending COPY data, treat the data as COPY data. The pgCopyIn
* check is only needed for backwards compatibility with ancient
* archive files that might just issue a COPY command without marking
* it properly. Note that in an archive entry that has a copyStmt,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.54 2006/06/27 02:56:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.55 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef WIN32
tm->tmpFH = tmpfile();
#else
+
/*
- * On WIN32, tmpfile() generates a filename in the root directory,
- * which requires administrative permissions on certain systems.
- * Loop until we find a unique file name we can create.
+ * On WIN32, tmpfile() generates a filename in the root directory,
+ * which requires administrative permissions on certain systems. Loop
+ * until we find a unique file name we can create.
*/
while (1)
{
- char *name;
- int fd;
-
+ char *name;
+ int fd;
+
name = _tempnam(NULL, "pg_temp_");
if (name == NULL)
break;
O_TEMPORARY, S_IRUSR | S_IWUSR);
free(name);
- if (fd != -1) /* created a file */
+ if (fd != -1) /* created a file */
{
tm->tmpFH = fdopen(fd, "w+b");
break;
res = fwrite(&buf[0], 1, cnt, th->tarFH);
if (res != cnt)
die_horribly(AH, modulename,
- "could not write to output file: %s\n", strerror(errno));
+ "could not write to output file: %s\n", strerror(errno));
len += res;
}
* by PostgreSQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.449 2006/09/27 15:41:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.450 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct objnameArg
{
struct objnameArg *next;
- char *name; /* name of the relation */
- bool is_include; /* include/exclude? */
+ char *name; /* name of the relation */
+ bool is_include; /* include/exclude? */
} objnameArg;
-objnameArg *schemaList = NULL; /* List of schemas to include/exclude */
-objnameArg *tableList = NULL; /* List of tables to include/exclude */
+objnameArg *schemaList = NULL; /* List of schemas to include/exclude */
+objnameArg *tableList = NULL; /* List of tables to include/exclude */
-char *matchingSchemas = NULL; /* Final list of schemas to dump by oid */
-char *matchingTables = NULL; /* Final list of tables to dump by oid */
+char *matchingSchemas = NULL; /* Final list of schemas to dump by
+ * oid */
+char *matchingTables = NULL; /* Final list of tables to dump by oid */
char g_opaque_type[10]; /* name for the opaque type */
{
PQExpBuffer query = createPQExpBuffer();
PGresult *res;
- objnameArg *this_obj_name, *schemaList_tail = NULL, *tableList_tail = NULL;
+ objnameArg *this_obj_name,
+ *schemaList_tail = NULL,
+ *tableList_tail = NULL;
int c;
const char *filename = NULL;
const char *format = "p";
ignore_version = true;
break;
- case 'n': /* Include schemas */
- case 'N': /* Exclude schemas */
- case 't': /* Include tables */
- case 'T': /* Exclude tables */
+ case 'n': /* Include schemas */
+ case 'N': /* Exclude schemas */
+ case 't': /* Include tables */
+ case 'T': /* Exclude tables */
if (strlen(optarg) < 1)
{
{
/* Create a struct for this name */
objnameArg *new_obj_name = (objnameArg *)
- malloc(sizeof(objnameArg));
+ malloc(sizeof(objnameArg));
new_obj_name->next = NULL;
new_obj_name->name = strdup(optarg);
}
/*
- * Get the active encoding and the standard_conforming_strings setting,
- * so we know how to escape strings.
+ * Get the active encoding and the standard_conforming_strings setting, so
+ * we know how to escape strings.
*/
g_fout->encoding = PQclientEncoding(g_conn);
/* Special case for when -N is the first argument */
if (this_obj_name == schemaList && !this_obj_name->is_include)
appendPQExpBuffer(query,
- "SELECT oid FROM pg_catalog.pg_namespace "
- "WHERE nspname NOT LIKE 'pg_%%' AND "
- " nspname != 'information_schema' EXCEPT\n");
-
+ "SELECT oid FROM pg_catalog.pg_namespace "
+ "WHERE nspname NOT LIKE 'pg_%%' AND "
+ " nspname != 'information_schema' EXCEPT\n");
+
appendPQExpBuffer(query, "SELECT oid FROM pg_catalog.pg_namespace WHERE");
}
-
+
appendPQExpBuffer(query, "%s nspname %c ", switch_include_exclude ? "" : " OR",
- /* any meta-characters? */
- strpbrk(this_obj_name->name,"([{\\.?+") == NULL ? '=' : '~');
+ /* any meta-characters? */
+ strpbrk(this_obj_name->name, "([{\\.?+") == NULL ? '=' : '~');
appendStringLiteralAH(query, this_obj_name->name, g_fout);
-
+
if (this_obj_name->next && this_obj_name->next->is_include == this_obj_name->is_include)
switch_include_exclude = false;
else
{
switch_include_exclude = true;
-
+
/* Add the joiner if needed */
if (this_obj_name->next)
appendPQExpBuffer(query, "\n%s\n",
- this_obj_name->next->is_include ? "UNION" : "EXCEPT");
+ this_obj_name->next->is_include ? "UNION" : "EXCEPT");
}
}
/* Construct OID list of matching schemas */
if (schemaList)
{
- int len;
-
+ int len;
+
res = PQexec(g_conn, query->data);
check_sql_result(res, g_conn, query->data, PGRES_TUPLES_OK);
if (PQntuples(res) == 0)
len += strlen(PQgetvalue(res, i, 0)) + 1;
/*
- * Need to use comma separators so it can be used by IN. zero
- * is a dummy placeholder. Format is " oid oid oid ".
+ * Need to use comma separators so it can be used by IN. zero is a
+ * dummy placeholder. Format is " oid oid oid ".
*/
matchingSchemas = malloc(len + 1);
strcpy(matchingSchemas, " ");
/* Special case for when -T is the first argument */
if (this_obj_name == tableList && !this_obj_name->is_include && !strlen(query->data))
appendPQExpBuffer(query,
- "SELECT pg_class.oid FROM pg_catalog.pg_class, pg_catalog.pg_namespace "
- "WHERE relkind='r' AND "
- " relnamespace = pg_namespace.oid AND "
- " nspname NOT LIKE 'pg_%%' AND "
- " nspname != 'information_schema' EXCEPT\n");
-
+ "SELECT pg_class.oid FROM pg_catalog.pg_class, pg_catalog.pg_namespace "
+ "WHERE relkind='r' AND "
+ " relnamespace = pg_namespace.oid AND "
+ " nspname NOT LIKE 'pg_%%' AND "
+ " nspname != 'information_schema' EXCEPT\n");
+
appendPQExpBuffer(query, "SELECT oid FROM pg_catalog.pg_class WHERE relkind='r' AND (");
}
-
+
appendPQExpBuffer(query, "%srelname %c ", switch_include_exclude ? "" : " OR ",
- /* any meta-characters? */
- strpbrk(this_obj_name->name,"([{\\.?+") == NULL ? '=' : '~');
+ /* any meta-characters? */
+ strpbrk(this_obj_name->name, "([{\\.?+") == NULL ? '=' : '~');
appendStringLiteralAH(query, this_obj_name->name, g_fout);
-
+
if (this_obj_name->next && this_obj_name->next->is_include == this_obj_name->is_include)
switch_include_exclude = false;
else
{
switch_include_exclude = true;
appendPQExpBuffer(query, ")");
-
+
/* Add the joiner if needed */
if (this_obj_name->next)
appendPQExpBuffer(query, "\n%s\n", this_obj_name->next->is_include ?
/* Construct OID list of matching tables */
if (tableList)
{
- int len;
-
+ int len;
+
/* Restrict by schema? */
if (matchingSchemas != NULL)
{
- char *matchingSchemas_commas = strdup(matchingSchemas), *p;
+ char *matchingSchemas_commas = strdup(matchingSchemas),
+ *p;
/* Construct "IN" SQL string by adding commas, " oid, oid, oid " */
for (p = matchingSchemas_commas; *p; p++)
{
/* No commas for first/last characters */
- if (*p == ' ' && p != matchingSchemas_commas && *(p+1))
+ if (*p == ' ' && p != matchingSchemas_commas && *(p + 1))
*p = ',';
}
{
/*
* If specific tables are being dumped, do not dump any complete
- * namespaces. If specific namespaces are being dumped, dump just
- * those namespaces. Otherwise, dump all non-system namespaces.
+ * namespaces. If specific namespaces are being dumped, dump just those
+ * namespaces. Otherwise, dump all non-system namespaces.
*/
nsinfo->dobj.dump = false;
if (matchingTables != NULL)
- /* false */;
+ /* false */ ;
else if (matchingSchemas != NULL)
{
- char *search_oid = malloc(20);
+ char *search_oid = malloc(20);
sprintf(search_oid, " %d ", nsinfo->dobj.catId.oid);
if (strstr(matchingSchemas, search_oid) != NULL)
}
else
{
- char *search_oid = malloc(20);
+ char *search_oid = malloc(20);
sprintf(search_oid, " %d ", tbinfo->dobj.catId.oid);
if (strstr(matchingTables, search_oid) != NULL)
selectDumpableObject(DumpableObject *dobj)
{
/*
- * Default policy is to dump if parent namespace is dumpable,
- * or always for non-namespace-associated items.
+ * Default policy is to dump if parent namespace is dumpable, or always
+ * for non-namespace-associated items.
*/
if (dobj->namespace)
dobj->dump = dobj->namespace->dobj.dump;
"(%s datdba) as dba, "
"pg_encoding_to_char(encoding) as encoding, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) as tablespace, "
- "shobj_description(oid, 'pg_database') as description "
+ "shobj_description(oid, 'pg_database') as description "
"FROM pg_database "
"WHERE datname = ",
/* Dump DB comment if any */
if (g_fout->remoteVersion >= 80200)
{
- /* 8.2 keeps comments on shared objects in a shared table, so
- * we cannot use the dumpComment used for other database objects.
+ /*
+ * 8.2 keeps comments on shared objects in a shared table, so we
+ * cannot use the dumpComment used for other database objects.
*/
- char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
- if (comment && strlen(comment)) {
+ char *comment = PQgetvalue(res, 0, PQfnumber(res, "description"));
+
+ if (comment && strlen(comment))
+ {
resetPQExpBuffer(dbQry);
appendPQExpBuffer(dbQry, "COMMENT ON DATABASE %s IS ", fmtId(datname));
appendStringLiteralAH(dbQry, comment, AH);
appendPQExpBuffer(dbQry, ";\n");
ArchiveEntry(AH, dbCatId, createDumpId(), datname, NULL, NULL,
- dba, false, "COMMENT", dbQry->data, "", NULL,
- &dbDumpId, 1, NULL, NULL);
+ dba, false, "COMMENT", dbQry->data, "", NULL,
+ &dbDumpId, 1, NULL, NULL);
}
- } else {
+ }
+ else
+ {
resetPQExpBuffer(dbQry);
appendPQExpBuffer(dbQry, "DATABASE %s", fmtId(datname));
dumpComment(AH, dbQry->data, NULL, "",
- dbCatId, 0, dbDumpId);
+ dbCatId, 0, dbDumpId);
}
PQclear(res);
/*
* If it's a base type, make a DumpableObject representing a shell
- * definition of the type. We will need to dump that ahead of the
- * I/O functions for the type.
+ * definition of the type. We will need to dump that ahead of the I/O
+ * functions for the type.
*
* Note: the shell type doesn't have a catId. You might think it
- * should copy the base type's catId, but then it might capture
- * the pg_depend entries for the type, which we don't want.
+ * should copy the base type's catId, but then it might capture the
+ * pg_depend entries for the type, which we don't want.
*/
if (tinfo[i].dobj.dump && tinfo[i].typtype == 'b')
{
tinfo[i].shellType = stinfo;
/*
- * Initially mark the shell type as not to be dumped. We'll
- * only dump it if the I/O functions need to be dumped; this
- * is taken care of while sorting dependencies.
+ * Initially mark the shell type as not to be dumped. We'll only
+ * dump it if the I/O functions need to be dumped; this is taken
+ * care of while sorting dependencies.
*/
stinfo->dobj.dump = false;
{
appendPQExpBuffer(query, "SELECT tableoid, oid, aggname, "
"0::oid as aggnamespace, "
- "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END as pronargs, "
+ "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END as pronargs, "
"aggbasetype as proargtypes, "
"(%s aggowner) as rolname, "
"'{=X}' as aggacl "
"(SELECT oid FROM pg_class WHERE relname = 'pg_aggregate') AS tableoid, "
"oid, aggname, "
"0::oid as aggnamespace, "
- "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END as pronargs, "
+ "CASE WHEN aggbasetype = 0 THEN 0 ELSE 1 END as pronargs, "
"aggbasetype as proargtypes, "
"(%s aggowner) as rolname, "
"'{=X}' as aggacl "
parseOidArray(PQgetvalue(res, i, i_proargtypes),
agginfo[i].aggfn.argtypes,
agginfo[i].aggfn.nargs);
- else /* it's just aggbasetype */
+ else
+ /* it's just aggbasetype */
agginfo[i].aggfn.argtypes[0] = atooid(PQgetvalue(res, i, i_proargtypes));
}
PQclear(res);
/*
- * Force sequences that are "owned" by table columns to be dumped
- * whenever their owning table is being dumped.
+ * Force sequences that are "owned" by table columns to be dumped whenever
+ * their owning table is being dumped.
*/
for (i = 0; i < ntups; i++)
{
TableInfo *seqinfo = &tblinfo[i];
- int j;
+ int j;
if (!OidIsValid(seqinfo->owning_tab))
continue; /* not an owned sequence */
"c.tableoid as contableoid, "
"c.oid as conoid, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) as tablespace, "
- "array_to_string(t.reloptions, ', ') as options "
+ "array_to_string(t.reloptions, ', ') as options "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"c.tableoid as contableoid, "
"c.oid as conoid, "
"(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) as tablespace, "
- "null as options "
+ "null as options "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"c.tableoid as contableoid, "
"c.oid as conoid, "
"NULL as tablespace, "
- "null as options "
+ "null as options "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
"0::oid as contableoid, "
"t.oid as conoid, "
"NULL as tablespace, "
- "null as options "
+ "null as options "
"FROM pg_index i, pg_class t "
"WHERE t.oid = i.indexrelid "
"AND i.indrelid = '%u'::oid "
"0::oid as contableoid, "
"t.oid as conoid, "
"NULL as tablespace, "
- "null as options "
+ "null as options "
"FROM pg_index i, pg_class t "
"WHERE t.oid = i.indexrelid "
"AND i.indrelid = '%u'::oid "
else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
{
typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
- typdefault_is_literal = true; /* it needs quotes */
+ typdefault_is_literal = true; /* it needs quotes */
}
else
typdefault = NULL;
/*
* DROP must be fully qualified in case same name appears in pg_catalog.
* The reason we include CASCADE is that the circular dependency between
- * the type and its I/O functions makes it impossible to drop the type
- * any other way.
+ * the type and its I/O functions makes it impossible to drop the type any
+ * other way.
*/
appendPQExpBuffer(delq, "DROP TYPE %s.",
fmtId(tinfo->dobj.namespace->dobj.name));
else if (!PQgetisnull(res, 0, PQfnumber(res, "typdefault")))
{
typdefault = PQgetvalue(res, 0, PQfnumber(res, "typdefault"));
- typdefault_is_literal = true; /* it needs quotes */
+ typdefault_is_literal = true; /* it needs quotes */
}
else
typdefault = NULL;
/*
* Note the lack of a DROP command for the shell type; any required DROP
- * is driven off the base type entry, instead. This interacts with
+ * is driven off the base type entry, instead. This interacts with
* _printTocEntry()'s use of the presence of a DROP command to decide
- * whether an entry needs an ALTER OWNER command. We don't want to
- * alter the shell type's owner immediately on creation; that should
- * happen only after it's filled in, otherwise the backend complains.
+ * whether an entry needs an ALTER OWNER command. We don't want to alter
+ * the shell type's owner immediately on creation; that should happen only
+ * after it's filled in, otherwise the backend complains.
*/
appendPQExpBuffer(q, "CREATE TYPE %s;\n",
* contains quote or backslash; else use regular quoting.
*/
if (disable_dollar_quoting ||
- (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
+ (strchr(prosrc, '\'') == NULL && strchr(prosrc, '\\') == NULL))
appendStringLiteralAH(asPart, prosrc, fout);
else
appendStringLiteralDQ(asPart, prosrc, NULL);
* Skip this cast if all objects are from pg_
*/
if ((funcInfo == NULL ||
- strncmp(funcInfo->dobj.namespace->dobj.name, "pg_", 3) == 0) &&
+ strncmp(funcInfo->dobj.namespace->dobj.name, "pg_", 3) == 0) &&
strncmp(sourceInfo->dobj.namespace->dobj.name, "pg_", 3) == 0 &&
strncmp(targetInfo->dobj.namespace->dobj.name, "pg_", 3) == 0)
return;
}
/*
- * Default value --- suppress if inherited or to be
- * printed separately.
+ * Default value --- suppress if inherited or to be printed
+ * separately.
*/
if (tbinfo->attrdefs[j] != NULL &&
!tbinfo->inhAttrDef[j] &&
/*
* The logic we use for restoring sequences is as follows:
*
- * Add a CREATE SEQUENCE statement as part of a "schema" dump
- * (use last_val for start if called is false, else use min_val for
- * start_val). Also, if the sequence is owned by a column, add an
- * ALTER SEQUENCE SET OWNED command for it.
+ * Add a CREATE SEQUENCE statement as part of a "schema" dump (use
+ * last_val for start if called is false, else use min_val for start_val).
+ * Also, if the sequence is owned by a column, add an ALTER SEQUENCE SET
+ * OWNED command for it.
*
* Add a 'SETVAL(seq, last_val, iscalled)' as part of a "data" dump.
*/
* as a separate TOC entry immediately following the sequence's own
* entry. It's OK to do this rather than using full sorting logic,
* because the dependency that tells us it's owned will have forced
- * the table to be created first. We can't just include the ALTER
- * in the TOC entry because it will fail if we haven't reassigned
- * the sequence owner to match the table's owner.
+ * the table to be created first. We can't just include the ALTER in
+ * the TOC entry because it will fail if we haven't reassigned the
+ * sequence owner to match the table's owner.
*
* We need not schema-qualify the table reference because both
* sequence and table must be in the same schema.
appendPQExpBuffer(query, " OWNED BY %s",
fmtId(owning_tab->dobj.name));
appendPQExpBuffer(query, ".%s;\n",
- fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
+ fmtId(owning_tab->attnames[tbinfo->owning_col - 1]));
ArchiveEntry(fout, nilCatalogId, createDumpId(),
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name,
NULL,
tbinfo->rolname,
- false, "SEQUENCE OWNED BY", query->data, "", NULL,
+ false, "SEQUENCE OWNED BY", query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
}
{
if (*s == '\'')
appendPQExpBufferChar(query, '\'');
+
/*
- * bytea unconditionally doubles backslashes, so we suppress
- * the doubling for standard_conforming_strings.
+ * bytea unconditionally doubles backslashes, so we suppress the
+ * doubling for standard_conforming_strings.
*/
if (fout->std_strings && *s == '\\' && s[1] == '\\')
s++;
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.82 2006/09/27 15:41:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.83 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void dumpDatabaseConfig(PGconn *conn, const char *dbname);
static void dumpUserConfig(PGconn *conn, const char *username);
static void makeAlterConfigCommand(PGconn *conn, const char *arrayitem,
- const char *type, const char *name);
+ const char *type, const char *name);
static void dumpDatabases(PGconn *conn);
static void dumpTimestamp(char *msg);
force_password, true);
/*
- * Get the active encoding and the standard_conforming_strings setting,
- * so we know how to escape strings.
+ * Get the active encoding and the standard_conforming_strings setting, so
+ * we know how to escape strings.
*/
encoding = PQclientEncoding(conn);
std_strings = PQparameterStatus(conn, "standard_conforming_strings");
"rolcreaterole, rolcreatedb, rolcatupdate, "
"rolcanlogin, rolconnlimit, rolpassword, "
"rolvaliduntil, "
- "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment "
+ "pg_catalog.shobj_description(oid, 'pg_authid') as rolcomment "
"FROM pg_authid "
"ORDER BY 1");
else if (server_version >= 80100)
appendPQExpBuffer(buf, ";\n");
- if (!PQgetisnull(res, i, i_rolcomment)) {
+ if (!PQgetisnull(res, i, i_rolcomment))
+ {
appendPQExpBuffer(buf, "COMMENT ON ROLE %s IS ", fmtId(rolename));
appendStringLiteralConn(buf, PQgetvalue(res, i, i_rolcomment), conn);
appendPQExpBuffer(buf, ";\n");
*/
if (server_version >= 80200)
res = executeQuery(conn, "SELECT spcname, "
- "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
+ "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
"spclocation, spcacl, "
- "pg_catalog.shobj_description(oid, 'pg_tablespace') "
+ "pg_catalog.shobj_description(oid, 'pg_tablespace') "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname !~ '^pg_' "
"ORDER BY 1");
- else
+ else
res = executeQuery(conn, "SELECT spcname, "
- "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
+ "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
"spclocation, spcacl, "
"null "
"FROM pg_catalog.pg_tablespace "
exit(1);
}
- if (spccomment && strlen(spccomment)) {
+ if (spccomment && strlen(spccomment))
+ {
appendPQExpBuffer(buf, "COMMENT ON TABLESPACE %s IS ", fspcname);
appendStringLiteralConn(buf, spccomment, conn);
appendPQExpBuffer(buf, ";\n");
appendStringLiteralConn(buf, dbencoding, conn);
/*
- * Output tablespace if it isn't the default. For default, it
- * uses the default from the template database. If tablespace
- * is specified and tablespace creation failed earlier,
- * (e.g. no such directory), the database creation will fail
- * too. One solution would be to use 'SET default_tablespace'
- * like we do in pg_dump for setting non-default database
- * locations.
+ * Output tablespace if it isn't the default. For default, it
+ * uses the default from the template database. If tablespace is
+ * specified and tablespace creation failed earlier, (e.g. no such
+ * directory), the database creation will fail too. One solution
+ * would be to use 'SET default_tablespace' like we do in pg_dump
+ * for setting non-default database locations.
*/
if (strcmp(dbtablespace, "pg_default") != 0)
appendPQExpBuffer(buf, " TABLESPACE = %s",
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.52 2006/08/21 16:16:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.53 2006/10/04 00:30:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
RewriteControlFile(void)
{
int fd;
- char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
+ char buffer[PG_CONTROL_SIZE]; /* need not be aligned */
/*
* Adjust fields as needed to force an empty XLOG starting at the next
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.172 2006/08/29 15:19:50 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.173 2006/10/04 00:30:05 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
-#ifdef __BORLANDC__ /* needed for BCC */
+#ifdef __BORLANDC__ /* needed for BCC */
#undef mkdir
#endif
static char *
read_connect_arg(PsqlScanState scan_state)
{
- char *result;
- char quote;
+ char *result;
+ char quote;
/*
- * Ideally we should treat the arguments as SQL identifiers. But
- * for backwards compatibility with 7.2 and older pg_dump files,
- * we have to take unquoted arguments verbatim (don't downcase
- * them). For now, double-quoted arguments may be stripped of
- * double quotes (as if SQL identifiers). By 7.4 or so, pg_dump
- * files can be expected to double-quote all mixed-case \connect
- * arguments, and then we can get rid of OT_SQLIDHACK.
+ * Ideally we should treat the arguments as SQL identifiers. But for
+ * backwards compatibility with 7.2 and older pg_dump files, we have to
+ * take unquoted arguments verbatim (don't downcase them). For now,
+ * double-quoted arguments may be stripped of double quotes (as if SQL
+ * identifiers). By 7.4 or so, pg_dump files can be expected to
+ * double-quote all mixed-case \connect arguments, and then we can get rid
+ * of OT_SQLIDHACK.
*/
result = psql_scan_slash_option(scan_state, OT_SQLIDHACK, "e, true);
return result;
}
-
+
/*
* Subroutine to actually try to execute a backslash command.
*
* \c dbname user host port
*
- * If any of these parameters are omitted or specified as '-', the
- * current value of the parameter will be used instead. If the
- * parameter has no current value, the default value for that
- * parameter will be used. Some examples:
+ * If any of these parameters are omitted or specified as '-', the current
+ * value of the parameter will be used instead. If the parameter has no
+ * current value, the default value for that parameter will be used. Some
+ * examples:
*
- * \c - - hst Connect to current database on current port of
- * host "hst" as current user.
- * \c - usr - prt Connect to current database on "prt" port of current
- * host as user "usr".
- * \c dbs Connect to "dbs" database on current port of current
- * host as current user.
+ * \c - - hst Connect to current database on current port of host
+ * "hst" as current user. \c - usr - prt Connect to current database on
+ * "prt" port of current host as user "usr". \c dbs Connect to
+ * "dbs" database on current port of current host as current user.
*/
else if (strcmp(cmd, "c") == 0 || strcmp(cmd, "connect") == 0)
{
static char *
prompt_for_password(const char *username)
{
- char *result;
+ char *result;
if (username == NULL)
result = simple_prompt("Password: ", 100, false);
else
{
- char *prompt_text;
+ char *prompt_text;
prompt_text = malloc(strlen(username) + 32);
sprintf(prompt_text, "Password for user \"%s\": ", username);
static bool
do_connect(char *dbname, char *user, char *host, char *port)
{
- PGconn *o_conn = pset.db,
- *n_conn;
- char *password = NULL;
+ PGconn *o_conn = pset.db,
+ *n_conn;
+ char *password = NULL;
if (!dbname)
dbname = PQdb(o_conn);
port = PQport(o_conn);
/*
- * If the user asked to be prompted for a password, ask for one
- * now. If not, use the password from the old connection, provided
- * the username has not changed. Otherwise, try to connect without
- * a password first, and then ask for a password if we got the
- * appropriate error message.
+ * If the user asked to be prompted for a password, ask for one now. If
+ * not, use the password from the old connection, provided the username
+ * has not changed. Otherwise, try to connect without a password first,
+ * and then ask for a password if we got the appropriate error message.
*
- * XXX: this behavior is broken. It leads to spurious connection
- * attempts in the postmaster's log, and doing a string comparison
- * against the returned error message is pretty fragile.
+ * XXX: this behavior is broken. It leads to spurious connection attempts
+ * in the postmaster's log, and doing a string comparison against the
+ * returned error message is pretty fragile.
*/
if (pset.getPassword)
{
break;
/*
- * Connection attempt failed; either retry the connection
- * attempt with a new password, or give up.
+ * Connection attempt failed; either retry the connection attempt with
+ * a new password, or give up.
*/
if (strcmp(PQerrorMessage(n_conn), PQnoPasswordSupplied) == 0)
{
}
/*
- * Failed to connect to the database. In interactive mode,
- * keep the previous connection to the DB; in scripting mode,
- * close our previous connection as well.
+ * Failed to connect to the database. In interactive mode, keep the
+ * previous connection to the DB; in scripting mode, close our
+ * previous connection as well.
*/
if (pset.cur_cmd_interactive)
{
FILE *fd;
int result;
char *oldfilename;
- PGresult *res;
+ PGresult *res;
if (!filename)
return EXIT_FAILURE;
oldfilename = pset.inputfile;
pset.inputfile = filename;
- if (single_txn)
- res = PSQLexec("BEGIN", false);
+ if (single_txn)
+ res = PSQLexec("BEGIN", false);
result = MainLoop(fd);
- if (single_txn)
- res = PSQLexec("COMMIT", false);
+ if (single_txn)
+ res = PSQLexec("COMMIT", false);
fclose(fd);
pset.inputfile = oldfilename;
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.h,v 1.27 2006/07/18 17:42:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.h,v 1.28 2006/10/04 00:30:05 momjian Exp $
*/
#ifndef COMMAND_H
#define COMMAND_H
typedef enum _backslashResult
{
- PSQL_CMD_UNKNOWN = 0, /* not done parsing yet (internal only) */
- PSQL_CMD_SEND, /* query complete; send off */
- PSQL_CMD_SKIP_LINE, /* keep building query */
- PSQL_CMD_TERMINATE, /* quit program */
- PSQL_CMD_NEWEDIT, /* query buffer was changed (e.g., via \e) */
- PSQL_CMD_ERROR /* the execution of the backslash command
+ PSQL_CMD_UNKNOWN = 0, /* not done parsing yet (internal only) */
+ PSQL_CMD_SEND, /* query complete; send off */
+ PSQL_CMD_SKIP_LINE, /* keep building query */
+ PSQL_CMD_TERMINATE, /* quit program */
+ PSQL_CMD_NEWEDIT, /* query buffer was changed (e.g., via \e) */
+ PSQL_CMD_ERROR /* the execution of the backslash command
* resulted in an error */
} backslashResult;
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.129 2006/09/27 15:41:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.130 2006/10/04 00:30:05 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
sigjmp_buf sigint_interrupt_jmp;
-static PGcancel * volatile cancelConn = NULL;
+static PGcancel *volatile cancelConn = NULL;
#ifdef WIN32
static CRITICAL_SECTION cancelConnLock;
{
pqsignal(SIGINT, handle_sigint);
}
-
#else /* WIN32 */
static BOOL WINAPI
SetConsoleCtrlHandler(consoleHandler, TRUE);
}
-
#endif /* WIN32 */
void
SetCancelConn(void)
{
- PGcancel *oldCancelConn;
+ PGcancel *oldCancelConn;
#ifdef WIN32
EnterCriticalSection(&cancelConnLock);
void
ResetCancelConn(void)
{
- PGcancel *oldCancelConn;
+ PGcancel *oldCancelConn;
#ifdef WIN32
EnterCriticalSection(&cancelConnLock);
{
/* Default fetch-it-all-and-print mode */
TimevalStruct before,
- after;
+ after;
if (pset.timing)
GETTIMEOFDAY(&before);
ExecQueryUsingCursor(const char *query, double *elapsed_msec)
{
bool OK = true;
- PGresult *results;
- PQExpBufferData buf;
+ PGresult *results;
+ PQExpBufferData buf;
printQueryOpt my_popt = pset.popt;
FILE *queryFout_copy = pset.queryFout;
bool queryFoutPipe_copy = pset.queryFoutPipe;
- bool started_txn = false;
- bool did_pager = false;
- int ntuples;
- char fetch_cmd[64];
+ bool started_txn = false;
+ bool did_pager = false;
+ int ntuples;
+ char fetch_cmd[64];
TimevalStruct before,
- after;
+ after;
*elapsed_msec = 0;
if (pset.gfname)
{
/* keep this code in sync with PrintQueryTuples */
- pset.queryFout = stdout; /* so it doesn't get closed */
+ pset.queryFout = stdout; /* so it doesn't get closed */
/* open file/pipe */
if (!setQFout(pset.gfname))
GETTIMEOFDAY(&before);
/*
- * We try to close the cursor on either success or failure, but on
- * failure ignore the result (it's probably just a bleat about
- * being in an aborted transaction)
+ * We try to close the cursor on either success or failure, but on failure
+ * ignore the result (it's probably just a bleat about being in an aborted
+ * transaction)
*/
results = PQexec(pset.db, "CLOSE _psql_cursor");
if (OK)
}
/*
- * Note: these tests will match DROP SYSTEM and REINDEX TABLESPACE,
- * which aren't really valid commands so we don't care much.
- * The other four possible matches are correct.
+ * Note: these tests will match DROP SYSTEM and REINDEX TABLESPACE, which
+ * aren't really valid commands so we don't care much. The other four
+ * possible matches are correct.
*/
if ((wordlen == 4 && pg_strncasecmp(query, "drop", 4) == 0) ||
(wordlen == 7 && pg_strncasecmp(query, "reindex", 7) == 0))
if (wordlen == 6 && pg_strncasecmp(query, "select", 6) == 0)
return true;
-
+
if (wordlen == 6 && pg_strncasecmp(query, "values", 6) == 0)
return true;
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.h,v 1.50 2006/06/14 16:49:02 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.h,v 1.51 2006/10/04 00:30:05 momjian Exp $
*/
#ifndef COMMON_H
#define COMMON_H
extern sigjmp_buf sigint_interrupt_jmp;
extern volatile bool cancel_pressed;
+
/* Note: cancel_pressed is defined in print.c, see that file for reasons */
extern void setup_cancel_handler(void);
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.68 2006/08/30 23:34:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.69 2006/10/04 00:30:05 momjian Exp $
*/
#include "postgres_fe.h"
#include "copy.h"
/* Handle COPY (SELECT) case */
if (token[0] == '(')
{
- int parens = 1;
+ int parens = 1;
while (parens > 0)
{
nonstd_backslash, true, false, pset.encoding);
if (token && pg_strcasecmp(token, "as") == 0)
token = strtokx(NULL, whitespace, NULL, "'",
- nonstd_backslash, true, false, pset.encoding);
+ nonstd_backslash, true, false, pset.encoding);
if (token)
result->delim = pg_strdup(token);
else
nonstd_backslash, true, false, pset.encoding);
if (token && pg_strcasecmp(token, "as") == 0)
token = strtokx(NULL, whitespace, NULL, "'",
- nonstd_backslash, true, false, pset.encoding);
+ nonstd_backslash, true, false, pset.encoding);
if (token)
result->null = pg_strdup(token);
else
nonstd_backslash, true, false, pset.encoding);
if (token && pg_strcasecmp(token, "as") == 0)
token = strtokx(NULL, whitespace, NULL, "'",
- nonstd_backslash, true, false, pset.encoding);
+ nonstd_backslash, true, false, pset.encoding);
if (token)
result->quote = pg_strdup(token);
else
nonstd_backslash, true, false, pset.encoding);
if (token && pg_strcasecmp(token, "as") == 0)
token = strtokx(NULL, whitespace, NULL, "'",
- nonstd_backslash, true, false, pset.encoding);
+ nonstd_backslash, true, false, pset.encoding);
if (token)
result->escape = pg_strdup(token);
else
/*
- * Handle one of the "string" options of COPY. If the user gave a quoted
+ * Handle one of the "string" options of COPY. If the user gave a quoted
* string, pass it to the backend as-is; if it wasn't quoted then quote
* and escape it.
*/
bool
handleCopyOut(PGconn *conn, FILE *copystream)
{
- bool OK = true;
- char *buf;
- int ret;
- PGresult *res;
+ bool OK = true;
+ char *buf;
+ int ret;
+ PGresult *res;
for (;;)
{
OK = false;
}
PQclear(res);
-
+
return OK;
}
bool OK;
const char *prompt;
char buf[COPYBUFSIZ];
- PGresult *res;
+ PGresult *res;
/*
- * Establish longjmp destination for exiting from wait-for-input.
- * (This is only effective while sigint_interrupt_enabled is TRUE.)
+ * Establish longjmp destination for exiting from wait-for-input. (This is
+ * only effective while sigint_interrupt_enabled is TRUE.)
*/
if (sigsetjmp(sigint_interrupt_jmp, 1) != 0)
{
for (;;)
{
- int buflen;
+ int buflen;
/* enable longjmp while waiting for input */
sigint_interrupt_enabled = true;
bool copydone = false;
while (!copydone)
- { /* for each input line ... */
+ { /* for each input line ... */
bool firstload;
bool linedone;
fputs(prompt, stdout);
fflush(stdout);
}
-
+
firstload = true;
linedone = false;
while (!linedone)
- { /* for each bufferload in line ... */
- int linelen;
- char *fgresult;
+ { /* for each bufferload in line ... */
+ int linelen;
+ char *fgresult;
/* enable longjmp while waiting for input */
sigint_interrupt_enabled = true;
linelen = strlen(buf);
/* current line is done? */
- if (linelen > 0 && buf[linelen-1] == '\n')
+ if (linelen > 0 && buf[linelen - 1] == '\n')
linedone = true;
/* check for EOF marker, but not on a partial line */
copydone = true;
break;
}
-
+
firstload = false;
}
-
+
if (PQputCopyData(conn, buf, linelen) <= 0)
{
OK = false;
break;
}
}
-
+
pset.lineno++;
}
}
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.144 2006/08/29 15:19:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.145 2006/10/04 00:30:05 momjian Exp $
*/
#include "postgres_fe.h"
#include "describe.h"
" ELSE\n"
" pg_catalog.array_to_string(ARRAY(\n"
" SELECT\n"
- " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
+ " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
" FROM\n"
" pg_catalog.generate_series(0, pg_catalog.array_upper(p.proargtypes, 1)) AS s(i)\n"
" ), ', ')\n"
if (verbose)
appendPQExpBuffer(&buf,
",\n spcacl as \"%s\""
- ",\n pg_catalog.shobj_description(oid, 'pg_tablespace') AS \"%s\"",
+ ",\n pg_catalog.shobj_description(oid, 'pg_tablespace') AS \"%s\"",
_("Access privileges"), _("Description"));
appendPQExpBuffer(&buf,
"SELECT n.nspname as \"%s\",\n"
" p.proname as \"%s\",\n"
" CASE WHEN p.proretset THEN 'setof ' ELSE '' END ||\n"
- " pg_catalog.format_type(p.prorettype, NULL) as \"%s\",\n"
+ " pg_catalog.format_type(p.prorettype, NULL) as \"%s\",\n"
" CASE WHEN proallargtypes IS NOT NULL THEN\n"
" pg_catalog.array_to_string(ARRAY(\n"
" SELECT\n"
" CASE\n"
" WHEN p.proargmodes[s.i] = 'i' THEN ''\n"
" WHEN p.proargmodes[s.i] = 'o' THEN 'OUT '\n"
- " WHEN p.proargmodes[s.i] = 'b' THEN 'INOUT '\n"
+ " WHEN p.proargmodes[s.i] = 'b' THEN 'INOUT '\n"
" END ||\n"
" CASE\n"
- " WHEN COALESCE(p.proargnames[s.i], '') = '' THEN ''\n"
+ " WHEN COALESCE(p.proargnames[s.i], '') = '' THEN ''\n"
" ELSE p.proargnames[s.i] || ' ' \n"
" END ||\n"
- " pg_catalog.format_type(p.proallargtypes[s.i], NULL)\n"
+ " pg_catalog.format_type(p.proallargtypes[s.i], NULL)\n"
" FROM\n"
" pg_catalog.generate_series(1, pg_catalog.array_upper(p.proallargtypes, 1)) AS s(i)\n"
" ), ', ')\n"
" pg_catalog.array_to_string(ARRAY(\n"
" SELECT\n"
" CASE\n"
- " WHEN COALESCE(p.proargnames[s.i+1], '') = '' THEN ''\n"
+ " WHEN COALESCE(p.proargnames[s.i+1], '') = '' THEN ''\n"
" ELSE p.proargnames[s.i+1] || ' '\n"
" END ||\n"
- " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
+ " pg_catalog.format_type(p.proargtypes[s.i], NULL)\n"
" FROM\n"
" pg_catalog.generate_series(0, pg_catalog.array_upper(p.proargtypes, 1)) AS s(i)\n"
" ), ', ')\n"
"\nFROM pg_catalog.pg_proc p"
"\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace"
"\n LEFT JOIN pg_catalog.pg_language l ON l.oid = p.prolang"
- "\n JOIN pg_catalog.pg_roles r ON r.oid = p.proowner\n");
+ "\n JOIN pg_catalog.pg_roles r ON r.oid = p.proowner\n");
/*
* we skip in/out funcs by excluding functions that take or return cstring
}
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_database d"
- "\n JOIN pg_catalog.pg_roles r ON d.datdba = r.oid\n");
+ "\n JOIN pg_catalog.pg_roles r ON d.datdba = r.oid\n");
if (verbose)
appendPQExpBuffer(&buf,
- " JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid\n");
- appendPQExpBuffer(&buf,"ORDER BY 1;");
+ " JOIN pg_catalog.pg_tablespace t on d.dattablespace = t.oid\n");
+ appendPQExpBuffer(&buf, "ORDER BY 1;");
res = PSQLexec(buf.data, false);
termPQExpBuffer(&buf);
if (!res)
if (verbose)
appendPQExpBuffer(&buf, "\n, pg_catalog.shobj_description(r.oid, 'pg_authid') AS \"%s\"",
- _("Description"));
+ _("Description"));
appendPQExpBuffer(&buf, "\nFROM pg_catalog.pg_roles r\n");
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_class c"
- "\n JOIN pg_catalog.pg_roles r ON r.oid = c.relowner"
+ "\n JOIN pg_catalog.pg_roles r ON r.oid = c.relowner"
"\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace");
if (showIndexes)
appendPQExpBuffer(&buf,
_("Access privileges"), _("Description"));
appendPQExpBuffer(&buf,
- "\nFROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_roles r\n"
+ "\nFROM pg_catalog.pg_namespace n JOIN pg_catalog.pg_roles r\n"
" ON n.nspowner=r.oid\n"
"WHERE (n.nspname !~ '^pg_temp_' OR\n"
" n.nspname = (pg_catalog.current_schemas(true))[1])\n"); /* temp schema is first */
* we assume this was NOT done by scan_option. Also, adjust shell-style
* wildcard characters into regexp notation.
*
- * Note: the result of this pass is the actual regexp pattern we want
- * to execute. Quoting/escaping it into a SQL literal will be done below.
+ * Note: the result of this pass is the actual regexp pattern we want to
+ * execute. Quoting/escaping it into a SQL literal will be done below.
*/
appendPQExpBufferChar(&namebuf, '^');
}
/*
- * Now decide what we need to emit. Note there will be a leading '^'
- * in the patterns in any case.
+ * Now decide what we need to emit. Note there will be a leading '^' in
+ * the patterns in any case.
*/
if (namebuf.len > 1)
{
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.59 2006/08/29 15:19:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.60 2006/10/04 00:30:06 momjian Exp $
*/
#include "postgres_fe.h"
* Preserve newlines in saved queries by mapping '\n' to NL_IN_HISTORY
*
* It is assumed NL_IN_HISTORY will never be entered by the user
- * nor appear inside a multi-byte string. 0x00 is not properly
+ * nor appear inside a multi-byte string. 0x00 is not properly
* handled by the readline routines so it can not be used
* for this purpose.
*/
#ifdef USE_READLINE
if (useReadline)
{
- char *result;
+ char *result;
/* Enable SIGINT to longjmp to sigint_interrupt_jmp */
sigint_interrupt_enabled = true;
appendPQExpBufferStr(history_buf, s);
if (s[strlen(s) - 1] != '\n')
appendPQExpBufferChar(history_buf, '\n');
- }
-#endif
+ }
+#endif
}
#ifdef USE_READLINE
static char *prev_hist = NULL;
- char *s = history_buf->data;
+ char *s = history_buf->data;
if (useHistory && s[0])
{
}
else
{
- int i;
+ int i;
/* Trim any trailing \n's (OK to scribble on history_buf) */
- for (i = strlen(s)-1; i >= 0 && s[i] == '\n'; i--)
+ for (i = strlen(s) - 1; i >= 0 && s[i] == '\n'; i--)
;
s[i + 1] = '\0';
/* Save each previous line for ignoredups processing */
*
* Caller *must* have set up sigint_interrupt_jmp before calling.
*
- * Note: we re-use a static PQExpBuffer for each call. This is to avoid
+ * Note: we re-use a static PQExpBuffer for each call. This is to avoid
* leaking memory if interrupted by SIGINT.
*/
char *
for (;;)
{
- char *result;
+ char *result;
/* Enable SIGINT to longjmp to sigint_interrupt_jmp */
sigint_interrupt_enabled = true;
encode_history(void)
{
HIST_ENTRY *cur_hist;
- char *cur_ptr;
+ char *cur_ptr;
history_set_pos(0);
for (cur_hist = current_history(); cur_hist; cur_hist = next_history())
decode_history(void)
{
HIST_ENTRY *cur_hist;
- char *cur_ptr;
+ char *cur_ptr;
history_set_pos(0);
for (cur_hist = current_history(); cur_hist; cur_hist = next_history())
*cur_ptr = '\n';
}
}
-#endif /* USE_READLINE */
+#endif /* USE_READLINE */
/*
/*
- * This function is for saving the readline history when user
- * runs \s command or when psql finishes.
+ * This function is for saving the readline history when user
+ * runs \s command or when psql finishes.
*
* We have an argument named encodeFlag to handle the cases differently.
* In case of call via \s we don't really need to encode \n as \x01,
saveHistory(char *fname, bool encodeFlag)
{
#ifdef USE_READLINE
+
/*
- * Suppressing the write attempt when HISTFILE is set to /dev/null
- * may look like a negligible optimization, but it's necessary on e.g.
- * Darwin, where write_history will fail because it tries to chmod
- * the target file.
+ * Suppressing the write attempt when HISTFILE is set to /dev/null may
+ * look like a negligible optimization, but it's necessary on e.g. Darwin,
+ * where write_history will fail because it tries to chmod the target
+ * file.
*/
if (useHistory && fname &&
strcmp(fname, DEVNULL) != 0)
/*
* return value of write_history is not standardized across GNU
- * readline and libedit. Therefore, check for errno becoming set
- * to see if the write failed.
+ * readline and libedit. Therefore, check for errno becoming set to
+ * see if the write failed.
*/
errno = 0;
(void) write_history(fname);
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.83 2006/08/29 15:19:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.84 2006/10/04 00:30:06 momjian Exp $
*/
#include "postgres_fe.h"
#include "mainloop.h"
PQExpBuffer query_buf; /* buffer for query being accumulated */
PQExpBuffer previous_buf; /* if there isn't anything in the new buffer
* yet, use this one for \e, etc. */
- PQExpBuffer history_buf; /* earlier lines of a multi-line command,
- * not yet saved to readline history */
+ PQExpBuffer history_buf; /* earlier lines of a multi-line command, not
+ * yet saved to readline history */
char *line; /* current line of input */
int added_nl_pos;
bool success;
}
/*
- * Establish longjmp destination for exiting from wait-for-input.
- * We must re-do this each time through the loop for safety, since
- * the jmpbuf might get changed during command execution.
+ * Establish longjmp destination for exiting from wait-for-input. We
+ * must re-do this each time through the loop for safety, since the
+ * jmpbuf might get changed during command execution.
*/
if (sigsetjmp(sigint_interrupt_jmp, 1) != 0)
{
* If we added a newline to query_buf, and nothing else has
* been inserted in query_buf by the lexer, then strip off the
* newline again. This avoids any change to query_buf when a
- * line contains only a backslash command. Also, in this
+ * line contains only a backslash command. Also, in this
* situation we force out any previous lines as a separate
* history entry; we don't want SQL and backslash commands
* intermixed in history if at all possible.
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/mbprint.c,v 1.22 2006/07/14 14:52:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mbprint.c,v 1.23 2006/10/04 00:30:06 momjian Exp $
*/
#include "postgres_fe.h"
int
pg_wcswidth(const unsigned char *pwcs, size_t len, int encoding)
{
- int width = 0;
+ int width = 0;
while (len > 0)
{
- int chlen, chwidth;
+ int chlen,
+ chwidth;
- chlen = PQmblen((const char*) pwcs, encoding);
+ chlen = PQmblen((const char *) pwcs, encoding);
if (chlen > len)
- break; /* Invalid string */
-
+ break; /* Invalid string */
+
chwidth = PQdsplen((const char *) pwcs, encoding);
-
+
if (chwidth > 0)
width += chwidth;
pwcs += chlen;
/*
* pg_wcssize takes the given string in the given encoding and returns three
* values:
- * result_width: Width in display character of longest line in string
- * result_hieght: Number of lines in display output
- * result_format_size: Number of bytes required to store formatted representation of string
+ * result_width: Width in display character of longest line in string
+ * result_hieght: Number of lines in display output
+ * result_format_size: Number of bytes required to store formatted representation of string
*/
int
pg_wcssize(unsigned char *pwcs, size_t len, int encoding, int *result_width,
- int *result_height, int *result_format_size)
+ int *result_height, int *result_format_size)
{
- int w,
- chlen = 0,
- linewidth = 0;
- int width = 0;
- int height = 1;
- int format_size = 0;
+ int w,
+ chlen = 0,
+ linewidth = 0;
+ int width = 0;
+ int height = 1;
+ int format_size = 0;
for (; *pwcs && len > 0; pwcs += chlen)
{
chlen = PQmblen((char *) pwcs, encoding);
- if (len < (size_t)chlen)
+ if (len < (size_t) chlen)
break;
w = PQdsplen((char *) pwcs, encoding);
- if (chlen == 1) /* ASCII char */
+ if (chlen == 1) /* ASCII char */
{
- if (*pwcs == '\n') /* Newline */
+ if (*pwcs == '\n') /* Newline */
{
if (linewidth > width)
width = linewidth;
linewidth = 0;
height += 1;
- format_size += 1; /* For NUL char */
+ format_size += 1; /* For NUL char */
}
- else if (*pwcs == '\r') /* Linefeed */
+ else if (*pwcs == '\r') /* Linefeed */
{
linewidth += 2;
format_size += 2;
}
- else if (w <= 0) /* Other control char */
+ else if (w <= 0) /* Other control char */
{
linewidth += 4;
format_size += 4;
}
- else /* Output itself */
+ else
+ /* Output itself */
{
linewidth++;
format_size += 1;
}
}
- else if (w <= 0) /* Non-ascii control char */
+ else if (w <= 0) /* Non-ascii control char */
{
- linewidth += 6; /* \u0000 */
+ linewidth += 6; /* \u0000 */
format_size += 6;
}
- else /* All other chars */
+ else
+ /* All other chars */
{
linewidth += w;
format_size += chlen;
if (linewidth > width)
width = linewidth;
format_size += 1;
-
+
/* Set results */
if (result_width)
*result_width = width;
*result_height = height;
if (result_format_size)
*result_format_size = format_size;
-
+
return width;
}
void
pg_wcsformat(unsigned char *pwcs, size_t len, int encoding,
- struct lineptr *lines, int count)
+ struct lineptr * lines, int count)
{
int w,
chlen = 0;
- int linewidth = 0;
- unsigned char *ptr = lines->ptr; /* Pointer to data area */
+ int linewidth = 0;
+ unsigned char *ptr = lines->ptr; /* Pointer to data area */
for (; *pwcs && len > 0; pwcs += chlen)
{
- chlen = PQmblen((char *) pwcs,encoding);
- if (len < (size_t)chlen)
+ chlen = PQmblen((char *) pwcs, encoding);
+ if (len < (size_t) chlen)
break;
- w = PQdsplen((char *) pwcs,encoding);
+ w = PQdsplen((char *) pwcs, encoding);
- if (chlen == 1) /* single byte char char */
+ if (chlen == 1) /* single byte char char */
{
- if (*pwcs == '\n') /* Newline */
+ if (*pwcs == '\n') /* Newline */
{
- *ptr++ = 0; /* NULL char */
+ *ptr++ = 0; /* NULL char */
lines->width = linewidth;
linewidth = 0;
lines++;
count--;
if (count == 0)
- exit(1); /* Screwup */
-
+ exit(1); /* Screwup */
+
lines->ptr = ptr;
}
- else if (*pwcs == '\r') /* Linefeed */
+ else if (*pwcs == '\r') /* Linefeed */
{
strcpy((char *) ptr, "\\r");
linewidth += 2;
ptr += 2;
}
- else if (w <= 0) /* Other control char */
+ else if (w <= 0) /* Other control char */
{
sprintf((char *) ptr, "\\x%02X", *pwcs);
linewidth += 4;
ptr += 4;
}
- else /* Output itself */
+ else
+ /* Output itself */
{
linewidth++;
*ptr++ = *pwcs;
}
}
- else if (w <= 0) /* Non-ascii control char */
+ else if (w <= 0) /* Non-ascii control char */
{
if (encoding == PG_UTF8)
sprintf((char *) ptr, "\\u%04X", utf2ucs(pwcs));
else
- /* This case cannot happen in the current
- * code because only UTF-8 signals multibyte
- * control characters. But we may need to
- * support it at some stage */
+
+ /*
+ * This case cannot happen in the current code because only
+ * UTF-8 signals multibyte control characters. But we may need
+ * to support it at some stage
+ */
sprintf((char *) ptr, "\\u????");
-
+
ptr += 6;
linewidth += 6;
}
- else /* All other chars */
+ else
+ /* All other chars */
{
- int i;
- for (i=0; i < chlen; i++)
+ int i;
+
+ for (i = 0; i < chlen; i++)
*ptr++ = pwcs[i];
linewidth += w;
}
-/* $PostgreSQL: pgsql/src/bin/psql/mbprint.h,v 1.10 2006/07/14 14:52:26 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/bin/psql/mbprint.h,v 1.11 2006/10/04 00:30:06 momjian Exp $ */
#ifndef MBPRINT_H
#define MBPRINT_H
-struct lineptr {
+struct lineptr
+{
unsigned char *ptr;
- int width;
+ int width;
};
extern unsigned char *mbvalidate(unsigned char *pwcs, int encoding);
extern int pg_wcswidth(const unsigned char *pwcs, size_t len, int encoding);
-extern void pg_wcsformat(unsigned char *pwcs, size_t len, int encoding, struct lineptr *lines, int count);
+extern void pg_wcsformat(unsigned char *pwcs, size_t len, int encoding, struct lineptr * lines, int count);
extern int pg_wcssize(unsigned char *pwcs, size_t len, int encoding, int *width, int *height, int *format_size);
#endif /* MBPRINT_H */
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.89 2006/08/29 22:25:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.90 2006/10/04 00:30:06 momjian Exp $
*
* Note: we include postgres.h not postgres_fe.h so that we can include
* catalog/pg_type.h, and thereby have access to INT4OID and similar macros.
{
const char *opt_fieldsep = opt->fieldSep;
const char *opt_recordsep = opt->recordSep;
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned int col_count = 0;
unsigned int i;
const char *const * ptr;
need_recordsep = true;
}
}
- else /* assume continuing printout */
+ else
+ /* assume continuing printout */
need_recordsep = true;
/* print cells */
{
const char *opt_fieldsep = opt->fieldSep;
const char *opt_recordsep = opt->recordSep;
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned int col_count = 0;
unsigned int i;
const char *const * ptr;
need_recordsep = true;
}
}
- else /* assume continuing printout */
+ else
+ /* assume continuing printout */
need_recordsep = true;
/* print records */
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
- int encoding = opt->encoding;
+ int encoding = opt->encoding;
unsigned int col_count = 0;
unsigned int cell_count = 0;
unsigned int i;
- int tmp;
+ int tmp;
unsigned int *widths,
total_w;
unsigned int *heights;
unsigned int *format_space;
unsigned char **format_buf;
-
- const char *const *ptr;
-
- struct lineptr **col_lineptrs; /* pointers to line pointer for each column */
- struct lineptr *lineptr_list; /* complete list of linepointers */
-
- int *complete; /* Array remembering which columns have completed output */
+
+ const char *const * ptr;
+
+ struct lineptr **col_lineptrs; /* pointers to line pointer for each
+ * column */
+ struct lineptr *lineptr_list; /* complete list of linepointers */
+
+ int *complete; /* Array remembering which columns have
+ * completed output */
if (cancel_pressed)
return;
format_buf = NULL;
complete = NULL;
}
-
+
/* count cells (rows * cols) */
for (ptr = cells; *ptr; ptr++)
cell_count++;
for (i = 0; i < col_count; i++)
{
/* Get width & height */
- int height, space;
+ int height,
+ space;
+
pg_wcssize((unsigned char *) headers[i], strlen(headers[i]), encoding, &tmp, &height, &space);
if (tmp > widths[i])
widths[i] = tmp;
for (i = 0, ptr = cells; *ptr; ptr++, i++)
{
- int numeric_locale_len;
- int height, space;
+ int numeric_locale_len;
+ int height,
+ space;
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
numeric_locale_len = additional_numeric_locale_len(*ptr);
- else
+ else
numeric_locale_len = 0;
-
+
/* Get width, ignore height */
pg_wcssize((unsigned char *) *ptr, strlen(*ptr), encoding, &tmp, &height, &space);
tmp += numeric_locale_len;
for (i = 0; i < col_count; i++)
total_w += widths[i];
- /* At this point:
- * widths contains the max width of each column
- * heights contains the max height of a cell of each column
- * format_space contains maximum space required to store formatted string
- * so we prepare the formatting structures
+ /*
+ * At this point: widths contains the max width of each column heights
+ * contains the max height of a cell of each column format_space contains
+ * maximum space required to store formatted string so we prepare the
+ * formatting structures
*/
if (col_count > 0)
{
- int heights_total = 0;
+ int heights_total = 0;
struct lineptr *lineptr;
-
+
for (i = 0; i < col_count; i++)
heights_total += heights[i];
-
+
lineptr = lineptr_list = pg_local_calloc(heights_total, sizeof(*lineptr_list));
-
+
for (i = 0; i < col_count; i++)
{
col_lineptrs[i] = lineptr;
lineptr += heights[i];
-
+
format_buf[i] = pg_local_malloc(format_space[i]);
-
+
col_lineptrs[i]->ptr = format_buf[i];
}
}
if (title && !opt_tuples_only)
{
/* Get width & height */
- int height;
- pg_wcssize((unsigned char *)title, strlen(title), encoding, &tmp, &height, NULL);
+ int height;
+
+ pg_wcssize((unsigned char *) title, strlen(title), encoding, &tmp, &height, NULL);
if (tmp >= total_w)
fprintf(fout, "%s\n", title);
else
/* print headers */
if (!opt_tuples_only)
{
- int cols_todo;
- int line_count;
-
+ int cols_todo;
+ int line_count;
+
if (opt_border == 2)
_print_horizontal_line(col_count, widths, opt_border, fout);
for (i = 0; i < col_count; i++)
- pg_wcsformat((unsigned char *)headers[i], strlen(headers[i]), encoding, col_lineptrs[i], heights[i]);
-
+ pg_wcsformat((unsigned char *) headers[i], strlen(headers[i]), encoding, col_lineptrs[i], heights[i]);
+
cols_todo = col_count;
line_count = 0;
- memset(complete, 0, col_count*sizeof(int));
+ memset(complete, 0, col_count * sizeof(int));
while (cols_todo)
{
if (opt_border == 2)
unsigned int nbspace;
struct lineptr *this_line = col_lineptrs[i] + line_count;
+
if (!complete[i])
{
nbspace = widths[i] - this_line->width;
fprintf(fout, "%-*s%s%-*s",
nbspace / 2, "", this_line->ptr, (nbspace + 1) / 2, "");
- if (line_count == (heights[i]-1) || !(this_line+1)->ptr)
+ if (line_count == (heights[i] - 1) || !(this_line + 1)->ptr)
{
cols_todo--;
complete[i] = 1;
}
/* print cells */
- for (i = 0, ptr = cells; *ptr; i+=col_count, ptr+=col_count)
+ for (i = 0, ptr = cells; *ptr; i += col_count, ptr += col_count)
{
- int j;
- int cols_todo = col_count;
- int line_count; /* Number of lines output so far in row */
+ int j;
+ int cols_todo = col_count;
+ int line_count; /* Number of lines output so far in row */
if (cancel_pressed)
break;
for (j = 0; j < col_count; j++)
- pg_wcsformat((unsigned char*)ptr[j], strlen(ptr[j]), encoding, col_lineptrs[j], heights[j]);
-
+ pg_wcsformat((unsigned char *) ptr[j], strlen(ptr[j]), encoding, col_lineptrs[j], heights[j]);
+
line_count = 0;
- memset(complete, 0, col_count*sizeof(int));
+ memset(complete, 0, col_count * sizeof(int));
while (cols_todo)
{
/* beginning of line */
for (j = 0; j < col_count; j++)
{
struct lineptr *this_line = col_lineptrs[j] + line_count;
- bool finalspaces = (opt_border == 2 || j != col_count-1);
+ bool finalspaces = (opt_border == 2 || j != col_count - 1);
- if (complete[j]) /* Just print spaces... */
+ if (complete[j]) /* Just print spaces... */
{
if (finalspaces)
fprintf(fout, "%*s", widths[j], "");
/*
* Assumption: This code used only on strings
* without multibyte characters, otherwise
- * this_line->width < strlen(this_ptr) and we
- * get an overflow
+ * this_line->width < strlen(this_ptr) and we get
+ * an overflow
*/
- char *my_cell = format_numeric_locale((char *) this_line->ptr);
+ char *my_cell = format_numeric_locale((char *) this_line->ptr);
+
fprintf(fout, "%*s%s",
(int) (widths[i % col_count] - strlen(my_cell)), "",
my_cell);
}
else
fprintf(fout, "%-s%*s", this_line->ptr,
- finalspaces ? (widths[j] - this_line->width) : 0, "");
+ finalspaces ? (widths[j] - this_line->width) : 0, "");
/* If at the right height, done this col */
- if (line_count == heights[j]-1 || !this_line[1].ptr)
+ if (line_count == heights[j] - 1 || !this_line[1].ptr)
{
complete[j] = 1;
cols_todo--;
}
}
-
+
/* divider */
if ((j + 1) % col_count)
{
else if (line_count == 0)
fputs(" | ", fout);
else
- fprintf(fout, " %c ", complete[j+1] ? ' ' : ':');
+ fprintf(fout, " %c ", complete[j + 1] ? ' ' : ':');
}
}
if (opt_border == 2)
fprintf(fout, "%s\n", *ptr);
/*
- * for some reason MinGW (and MSVC) outputs an extra newline,
- * so this suppresses it
+ * for some reason MinGW (and MSVC) outputs an extra newline, so this
+ * suppresses it
*/
#ifndef WIN32
fputc('\n', fout);
free(format_space);
free(complete);
free(lineptr_list);
- for (i= 0; i < col_count; i++)
+ for (i = 0; i < col_count; i++)
free(format_buf[i]);
free(format_buf);
}
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
- int encoding = opt->encoding;
+ int encoding = opt->encoding;
unsigned int col_count = 0;
unsigned long record = opt->prior_records + 1;
const char *const * ptr;
dheight = 1,
hformatsize = 0,
dformatsize = 0;
- int tmp = 0;
+ int tmp = 0;
char *divider;
unsigned int cell_count = 0;
- struct lineptr *hlineptr, *dlineptr;
+ struct lineptr *hlineptr,
+ *dlineptr;
if (cancel_pressed)
return;
if (opt_border > 2)
opt_border = 2;
-
+
if (cells[0] == NULL && opt->start_table && opt->stop_table)
{
fprintf(fout, _("(No rows)\n"));
/* Find the maximum dimensions for the headers */
for (i = 0; i < col_count; i++)
{
- int height, fs;
+ int height,
+ fs;
+
pg_wcssize((unsigned char *) headers[i], strlen(headers[i]), encoding, &tmp, &height, &fs);
if (tmp > hwidth)
hwidth = tmp;
/* find longest data cell */
for (i = 0, ptr = cells; *ptr; ptr++, i++)
{
- int numeric_locale_len;
- int height, fs;
+ int numeric_locale_len;
+ int height,
+ fs;
if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
numeric_locale_len = additional_numeric_locale_len(*ptr);
- else
+ else
numeric_locale_len = 0;
pg_wcssize((unsigned char *) *ptr, strlen(*ptr), encoding, &tmp, &height, &fs);
if (fs > dformatsize)
dformatsize = fs;
}
-
- /* We now have all the information we need to setup the formatting structures */
+
+ /*
+ * We now have all the information we need to setup the formatting
+ * structures
+ */
dlineptr = pg_local_malloc(sizeof(*dlineptr) * dheight);
hlineptr = pg_local_malloc(sizeof(*hlineptr) * hheight);
-
+
dlineptr->ptr = pg_local_malloc(dformatsize);
hlineptr->ptr = pg_local_malloc(hformatsize);
/* print records */
for (i = 0, ptr = cells; *ptr; i++, ptr++)
{
- int line_count, dcomplete, hcomplete;
-
+ int line_count,
+ dcomplete,
+ hcomplete;
+
if (i % col_count == 0)
{
if (cancel_pressed)
}
/* Format the header */
- pg_wcsformat((unsigned char*)headers[i % col_count],
- strlen(headers[i % col_count]), encoding, hlineptr, hheight);
+ pg_wcsformat((unsigned char *) headers[i % col_count],
+ strlen(headers[i % col_count]), encoding, hlineptr, hheight);
/* Format the data */
- pg_wcsformat((unsigned char*)*ptr, strlen(*ptr), encoding, dlineptr, dheight);
-
+ pg_wcsformat((unsigned char *) *ptr, strlen(*ptr), encoding, dlineptr, dheight);
+
line_count = 0;
dcomplete = hcomplete = 0;
while (!dcomplete || !hcomplete)
{
fprintf(fout, "%-s%*s", hlineptr[line_count].ptr,
hwidth - hlineptr[line_count].width, "");
-
- if (line_count == (hheight-1) || !hlineptr[line_count+1].ptr)
+
+ if (line_count == (hheight - 1) || !hlineptr[line_count + 1].ptr)
hcomplete = 1;
}
else
fprintf(fout, "%*s", hwidth, "");
-
+
if (opt_border > 0)
- fprintf(fout, " %c ", (line_count==0)?'|':':');
+ fprintf(fout, " %c ", (line_count == 0) ? '|' : ':');
else
fputs(" ", fout);
if (!dcomplete)
{
- if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
- {
- char *my_cell = format_numeric_locale((char *) dlineptr[line_count].ptr);
- if (opt_border < 2)
- fprintf(fout, "%s\n", my_cell);
- else
- fprintf(fout, "%-s%*s |\n", my_cell,
+ if (opt_align[i % col_count] == 'r' && opt_numeric_locale)
+ {
+ char *my_cell = format_numeric_locale((char *) dlineptr[line_count].ptr);
+
+ if (opt_border < 2)
+ fprintf(fout, "%s\n", my_cell);
+ else
+ fprintf(fout, "%-s%*s |\n", my_cell,
(int) (dwidth - strlen(my_cell)), "");
- free(my_cell);
- }
- else
- {
- if (opt_border < 2)
- fprintf(fout, "%s\n", dlineptr[line_count].ptr);
- else
- fprintf(fout, "%-s%*s |\n", dlineptr[line_count].ptr,
+ free(my_cell);
+ }
+ else
+ {
+ if (opt_border < 2)
+ fprintf(fout, "%s\n", dlineptr[line_count].ptr);
+ else
+ fprintf(fout, "%-s%*s |\n", dlineptr[line_count].ptr,
dwidth - dlineptr[line_count].width, "");
- }
-
- if (line_count == dheight - 1 || !dlineptr[line_count+1].ptr)
- dcomplete = 1;
- }
- else
- {
- if (opt_border < 2)
- fputc('\n', fout);
- else
- fprintf(fout, "%*s |\n", dwidth, "");
- }
- line_count++;
- }
+ }
+
+ if (line_count == dheight - 1 || !dlineptr[line_count + 1].ptr)
+ dcomplete = 1;
+ }
+ else
+ {
+ if (opt_border < 2)
+ fputc('\n', fout);
+ else
+ fprintf(fout, "%*s |\n", dwidth, "");
+ }
+ line_count++;
+ }
}
if (opt->stop_table)
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
const char *opt_table_attr = opt->tableAttr;
unsigned int col_count = 0;
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
const char *opt_table_attr = opt->tableAttr;
unsigned int col_count = 0;
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
unsigned int col_count = 0;
unsigned int i;
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
unsigned int col_count = 0;
unsigned long record = opt->prior_records + 1;
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
unsigned int col_count = 0;
unsigned int i;
static void
print_troff_ms_vertical(const char *title, const char *const * headers,
- const char *const * cells, const char *const * footers,
+ const char *const * cells, const char *const * footers,
const char *opt_align, const printTableOpt *opt,
FILE *fout)
{
- bool opt_tuples_only = opt->tuples_only;
- bool opt_numeric_locale = opt->numericLocale;
+ bool opt_tuples_only = opt->tuples_only;
+ bool opt_numeric_locale = opt->numericLocale;
unsigned short int opt_border = opt->border;
unsigned int col_count = 0;
unsigned long record = opt->prior_records + 1;
)
{
const char *pagerprog;
- FILE *pagerpipe;
+ FILE *pagerpipe;
#ifdef TIOCGWINSZ
int result;
/*
* If printing was canceled midstream, warn about it.
*
- * Some pagers like less use Ctrl-C as part of their command
- * set. Even so, we abort our processing and warn the user
- * what we did. If the pager quit as a result of the
- * SIGINT, this message won't go anywhere ...
+ * Some pagers like less use Ctrl-C as part of their command set. Even
+ * so, we abort our processing and warn the user what we did. If the
+ * pager quit as a result of the SIGINT, this message won't go
+ * anywhere ...
*/
if (cancel_pressed)
fprintf(pagerpipe, _("Interrupted\n"));
headers = pg_local_calloc(nfields + 1, sizeof(*headers));
for (i = 0; i < nfields; i++)
- headers[i] = (char*) mbvalidate((unsigned char *) PQfname(result, i),
- opt->topt.encoding);
+ headers[i] = (char *) mbvalidate((unsigned char *) PQfname(result, i),
+ opt->topt.encoding);
/* set cells */
ncells = PQntuples(result) * nfields;
if (PQgetisnull(result, i / nfields, i % nfields))
cells[i] = opt->nullPrint ? opt->nullPrint : "";
else
- cells[i] = (char*)
- mbvalidate((unsigned char*) PQgetvalue(result, i / nfields, i % nfields),
+ cells[i] = (char *)
+ mbvalidate((unsigned char *) PQgetvalue(result, i / nfields, i % nfields),
opt->topt.encoding);
}
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.30 2006/08/29 22:25:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.31 2006/10/04 00:30:06 momjian Exp $
*/
#ifndef SETTINGS_H
#define SETTINGS_H
VariableSpace vars; /* "shell variable" repository */
/*
- * The remaining fields are set by assign hooks associated with
- * entries in "vars". They should not be set directly except by
- * those hook functions.
+ * The remaining fields are set by assign hooks associated with entries in
+ * "vars". They should not be set directly except by those hook
+ * functions.
*/
bool autocommit;
bool on_error_stop;
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.137 2006/08/29 22:25:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.138 2006/10/04 00:30:06 momjian Exp $
*/
#include "postgres_fe.h"
char *action_string;
bool no_readline;
bool no_psqlrc;
- bool single_txn;
+ bool single_txn;
};
static int parse_version(const char *versionString);
concp, wincp);
}
}
-
#endif
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/stringutils.c,v 1.44 2006/07/14 14:52:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/stringutils.c,v 1.45 2006/10/04 00:30:06 momjian Exp $
*/
#include "postgres_fe.h"
* quote - set of characters that can quote a token (NULL if none)
* escape - character that can quote quotes (0 if none)
* e_strings - if TRUE, treat E'...' syntax as a valid token
- * del_quotes - if TRUE, strip quotes from the returned token, else return
+ * del_quotes - if TRUE, strip quotes from the returned token, else return
* it exactly as found in the string
* encoding - the active character-set encoding
*
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.155 2006/09/22 21:39:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.156 2006/10/04 00:30:06 momjian Exp $
*/
/*----------------------------------------------------------------------
static char *previous_word(int point, int skip);
-static int find_open_parenthesis(int end);
+static int find_open_parenthesis(int end);
#if 0
static char *quote_file_name(char *text, int match_type, char *quote_pointer);
pg_strcasecmp(prev_wd, "RENAME") == 0))
COMPLETE_WITH_ATTR(prev2_wd, " UNION SELECT 'COLUMN'");
- /* If we have TABLE ALTER COLUMN|RENAME COLUMN, provide list of columns */
+ /*
+ * If we have TABLE ALTER COLUMN|RENAME COLUMN, provide list of
+ * columns
+ */
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
(pg_strcasecmp(prev2_wd, "ALTER") == 0 ||
pg_strcasecmp(prev2_wd, "RENAME") == 0) &&
pg_strcasecmp(prev_wd, "COLUMN") == 0)
COMPLETE_WITH_ATTR(prev3_wd, "");
-
+
/* ALTER TABLE xxx RENAME yyy */
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
pg_strcasecmp(prev2_wd, "RENAME") == 0 &&
{"CAST", "CONVERSION", "DATABASE", "INDEX", "LANGUAGE", "RULE", "SCHEMA",
"SEQUENCE", "TABLE", "TYPE", "VIEW", "COLUMN", "AGGREGATE", "FUNCTION",
"OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", "LARGE OBJECT",
- "TABLESPACE", "ROLE", NULL};
+ "TABLESPACE", "ROLE", NULL};
COMPLETE_WITH_LIST(list_COMMENT);
}
if (find_open_parenthesis(end))
COMPLETE_WITH_ATTR(prev_wd, "");
else
- COMPLETE_WITH_CONST("(");
+ COMPLETE_WITH_CONST("(");
}
else if (pg_strcasecmp(prev5_wd, "INDEX") == 0 &&
- pg_strcasecmp(prev3_wd, "ON") == 0 &&
- pg_strcasecmp(prev_wd, "(") == 0)
+ pg_strcasecmp(prev3_wd, "ON") == 0 &&
+ pg_strcasecmp(prev_wd, "(") == 0)
COMPLETE_WITH_ATTR(prev2_wd, "");
/* same if you put in USING */
else if (pg_strcasecmp(prev4_wd, "ON") == 0 &&
if (find_open_parenthesis(end))
{
static const char func_args_query[] = "select pg_catalog.oidvectortypes(proargtypes)||')' from pg_proc where proname='%s'";
- char *tmp_buf = malloc(strlen(func_args_query) + strlen(prev_wd));
+ char *tmp_buf = malloc(strlen(func_args_query) + strlen(prev_wd));
+
sprintf(tmp_buf, func_args_query, prev_wd);
COMPLETE_WITH_QUERY(tmp_buf);
free(tmp_buf);
{
static const char *const list_DROPCR[] =
{"CASCADE", "RESTRICT", NULL};
-
+
COMPLETE_WITH_LIST(list_DROPCR);
}
}
else if (pg_strcasecmp(prev4_wd, "DROP") == 0 &&
- pg_strcasecmp(prev3_wd, "FUNCTION") == 0 &&
- pg_strcasecmp(prev_wd, "(") == 0)
+ pg_strcasecmp(prev3_wd, "FUNCTION") == 0 &&
+ pg_strcasecmp(prev_wd, "(") == 0)
{
static const char func_args_query[] = "select pg_catalog.oidvectortypes(proargtypes)||')' from pg_proc where proname='%s'";
- char *tmp_buf = malloc(strlen(func_args_query) + strlen(prev2_wd));
+ char *tmp_buf = malloc(strlen(func_args_query) + strlen(prev2_wd));
+
sprintf(tmp_buf, func_args_query, prev2_wd);
COMPLETE_WITH_QUERY(tmp_buf);
free(tmp_buf);
{
static const char *const list_privileg[] =
{"SELECT", "INSERT", "UPDATE", "DELETE", "RULE", "REFERENCES",
- "TRIGGER", "CREATE", "CONNECT", "TEMPORARY", "EXECUTE", "USAGE",
- "ALL", NULL};
+ "TRIGGER", "CREATE", "CONNECT", "TEMPORARY", "EXECUTE", "USAGE",
+ "ALL", NULL};
COMPLETE_WITH_LIST(list_privileg);
}
/* Find the parenthesis after the last word */
-static int find_open_parenthesis(int end)
+static int
+find_open_parenthesis(int end)
{
- int i = end-1;
-
- while((rl_line_buffer[i]!=' ')&&(i>=0))
+ int i = end - 1;
+
+ while ((rl_line_buffer[i] != ' ') && (i >= 0))
{
- if (rl_line_buffer[i]=='(') return 1;
+ if (rl_line_buffer[i] == '(')
+ return 1;
i--;
}
- while((rl_line_buffer[i]==' ')&&(i>=0))
+ while ((rl_line_buffer[i] == ' ') && (i >= 0))
{
i--;
}
- if (rl_line_buffer[i]=='(')
+ if (rl_line_buffer[i] == '(')
{
- return 1;
+ return 1;
}
return 0;
*
* Copyright (c) 2000-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/variables.h,v 1.19 2006/08/29 15:19:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/variables.h,v 1.20 2006/10/04 00:30:06 momjian Exp $
*/
#ifndef VARIABLES_H
#define VARIABLES_H
VariableSpace CreateVariableSpace(void);
const char *GetVariable(VariableSpace space, const char *name);
-bool ParseVariableBool(const char *val);
+bool ParseVariableBool(const char *val);
int ParseVariableNum(const char *val,
int defaultval,
int faultval,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.23 2006/10/03 21:45:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.24 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool
yesno_prompt(const char *question)
{
- char prompt[256];
+ char prompt[256];
- /* translator: This is a question followed by the translated options for "yes" and "no". */
+ /*
+ * translator: This is a question followed by the translated options for
+ * "yes" and "no".
+ */
snprintf(prompt, sizeof(prompt), _("%s (%s/%s) "),
_(question), _(PG_YESLETTER), _(PG_NOLETTER));
for (;;)
{
- char *resp;
+ char *resp;
resp = simple_prompt(prompt, 1, true);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/createuser.c,v 1.33 2006/09/22 18:50:41 petere Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/createuser.c,v 1.34 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void help(const char *progname);
-enum trivalue {
+enum trivalue
+{
TRI_DEFAULT,
TRI_NO,
TRI_YES
char *conn_limit = NULL;
bool pwprompt = false;
char *newpassword = NULL;
- /* Tri-valued variables. */
- enum trivalue createdb = TRI_DEFAULT,
- superuser = TRI_DEFAULT,
- createrole = TRI_DEFAULT,
- inherit = TRI_DEFAULT,
- login = TRI_DEFAULT,
- encrypted = TRI_DEFAULT;
+
+ /* Tri-valued variables. */
+ enum trivalue createdb = TRI_DEFAULT,
+ superuser = TRI_DEFAULT,
+ createrole = TRI_DEFAULT,
+ inherit = TRI_DEFAULT,
+ login = TRI_DEFAULT,
+ encrypted = TRI_DEFAULT;
PQExpBufferData sql;
/*--------------------------------------------------------------------------
* gin.h
- * header file for postgres inverted index access method implementation.
+ * header file for postgres inverted index access method implementation.
*
- * Copyright (c) 2006, PostgreSQL Global Development Group
- * $PostgreSQL: pgsql/src/include/access/gin.h,v 1.7 2006/09/10 20:14:20 tgl Exp $
+ * Copyright (c) 2006, PostgreSQL Global Development Group
+ * $PostgreSQL: pgsql/src/include/access/gin.h,v 1.8 2006/10/04 00:30:06 momjian Exp $
*--------------------------------------------------------------------------
*/
/*
* amproc indexes for inverted indexes.
*/
-#define GIN_COMPARE_PROC 1
-#define GIN_EXTRACTVALUE_PROC 2
-#define GIN_EXTRACTQUERY_PROC 3
-#define GIN_CONSISTENT_PROC 4
-#define GINNProcs 4
+#define GIN_COMPARE_PROC 1
+#define GIN_EXTRACTVALUE_PROC 2
+#define GIN_EXTRACTQUERY_PROC 3
+#define GIN_CONSISTENT_PROC 4
+#define GINNProcs 4
typedef XLogRecPtr GinNSN;
/*
* Page opaque data in a inverted index page.
*/
-typedef struct GinPageOpaqueData {
- uint16 flags;
- OffsetNumber maxoff; /* number entries on GIN_DATA page:
- number of heap ItemPointer on GIN_DATA|GIN_LEAF page
- and number of records on GIN_DATA & ~GIN_LEAF page
- */
- BlockNumber rightlink;
+typedef struct GinPageOpaqueData
+{
+ uint16 flags;
+ OffsetNumber maxoff; /* number entries on GIN_DATA page: number of
+ * heap ItemPointer on GIN_DATA|GIN_LEAF page
+ * and number of records on GIN_DATA &
+ * ~GIN_LEAF page */
+ BlockNumber rightlink;
} GinPageOpaqueData;
typedef GinPageOpaqueData *GinPageOpaque;
#define GIN_ROOT_BLKNO (0)
-typedef struct {
- BlockIdData child_blkno; /* use it instead of BlockNumber to
- save space on page */
- ItemPointerData key;
+typedef struct
+{
+ BlockIdData child_blkno; /* use it instead of BlockNumber to save space
+ * on page */
+ ItemPointerData key;
} PostingItem;
-#define PostingItemGetBlockNumber(pointer) \
+#define PostingItemGetBlockNumber(pointer) \
BlockIdGetBlockNumber(&(pointer)->child_blkno)
-#define PostingItemSetBlockNumber(pointer, blockNumber) \
+#define PostingItemSetBlockNumber(pointer, blockNumber) \
BlockIdSet(&((pointer)->child_blkno), (blockNumber))
/*
* Page opaque data in a inverted index page.
*/
-#define GIN_DATA (1 << 0)
-#define GIN_LEAF (1 << 1)
-#define GIN_DELETED (1 << 2)
+#define GIN_DATA (1 << 0)
+#define GIN_LEAF (1 << 1)
+#define GIN_DELETED (1 << 2)
/*
- * Works on page
+ * Works on page
*/
#define GinPageGetOpaque(page) ( (GinPageOpaque) PageGetSpecialPointer(page) )
#define GinSetNPosting(itup,n) ItemPointerSetOffsetNumber(&(itup)->t_tid,(n))
#define GIN_TREE_POSTING ((OffsetNumber)0xffff)
#define GinIsPostingTree(itup) ( GinGetNPosting(itup)==GIN_TREE_POSTING )
-#define GinSetPostingTree(itup, blkno) ( GinSetNPosting((itup),GIN_TREE_POSTING ), ItemPointerSetBlockNumber(&(itup)->t_tid, blkno) )
-#define GinGetPostingTree(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
+#define GinSetPostingTree(itup, blkno) ( GinSetNPosting((itup),GIN_TREE_POSTING ), ItemPointerSetBlockNumber(&(itup)->t_tid, blkno) )
+#define GinGetPostingTree(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
-#define GinGetOrigSizePosting(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
+#define GinGetOrigSizePosting(itup) GinItemPointerGetBlockNumber(&(itup)->t_tid)
#define GinSetOrigSizePosting(itup,n) ItemPointerSetBlockNumber(&(itup)->t_tid,(n))
#define GinGetPosting(itup) ( (ItemPointer)(( ((char*)(itup)) + SHORTALIGN(GinGetOrigSizePosting(itup)) )) )
#define GinMaxItemSize \
- ((BLCKSZ - SizeOfPageHeaderData - \
+ ((BLCKSZ - SizeOfPageHeaderData - \
MAXALIGN(sizeof(GinPageOpaqueData))) / 3 - sizeof(ItemIdData))
#define GinDataPageGetData(page) \
(PageGetContents(page)+MAXALIGN(sizeof(ItemPointerData)))
#define GinDataPageGetRightBound(page) ((ItemPointer)PageGetContents(page))
-#define GinSizeOfItem(page) ( (GinPageIsLeaf(page)) ? sizeof(ItemPointerData) : sizeof(PostingItem) )
-#define GinDataPageGetItem(page,i) ( GinDataPageGetData(page) + ((i)-1) * GinSizeOfItem(page) )
+#define GinSizeOfItem(page) ( (GinPageIsLeaf(page)) ? sizeof(ItemPointerData) : sizeof(PostingItem) )
+#define GinDataPageGetItem(page,i) ( GinDataPageGetData(page) + ((i)-1) * GinSizeOfItem(page) )
#define GinDataPageGetFreeSpace(page) \
( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GinPageOpaqueData)) - \
-#define GIN_UNLOCK BUFFER_LOCK_UNLOCK
-#define GIN_SHARE BUFFER_LOCK_SHARE
+#define GIN_UNLOCK BUFFER_LOCK_UNLOCK
+#define GIN_SHARE BUFFER_LOCK_SHARE
#define GIN_EXCLUSIVE BUFFER_LOCK_EXCLUSIVE
-typedef struct GinState {
+typedef struct GinState
+{
FmgrInfo compareFn;
FmgrInfo extractValueFn;
FmgrInfo extractQueryFn;
#define XLOG_GIN_CREATE_PTREE 0x10
-typedef struct ginxlogCreatePostingTree {
- RelFileNode node;
- BlockNumber blkno;
- uint32 nitem;
+typedef struct ginxlogCreatePostingTree
+{
+ RelFileNode node;
+ BlockNumber blkno;
+ uint32 nitem;
/* follows list of heap's ItemPointer */
} ginxlogCreatePostingTree;
#define XLOG_GIN_INSERT 0x20
-typedef struct ginxlogInsert {
- RelFileNode node;
- BlockNumber blkno;
- BlockNumber updateBlkno;
- OffsetNumber offset;
- bool isDelete;
- bool isData;
- bool isLeaf;
- OffsetNumber nitem;
-
- /* follows: tuples or ItemPointerData or PostingItem or list of ItemPointerData*/
+typedef struct ginxlogInsert
+{
+ RelFileNode node;
+ BlockNumber blkno;
+ BlockNumber updateBlkno;
+ OffsetNumber offset;
+ bool isDelete;
+ bool isData;
+ bool isLeaf;
+ OffsetNumber nitem;
+
+ /*
+ * follows: tuples or ItemPointerData or PostingItem or list of
+ * ItemPointerData
+ */
} ginxlogInsert;
-#define XLOG_GIN_SPLIT 0x30
+#define XLOG_GIN_SPLIT 0x30
-typedef struct ginxlogSplit {
- RelFileNode node;
- BlockNumber lblkno;
- BlockNumber rootBlkno;
- BlockNumber rblkno;
- BlockNumber rrlink;
- OffsetNumber separator;
- OffsetNumber nitem;
+typedef struct ginxlogSplit
+{
+ RelFileNode node;
+ BlockNumber lblkno;
+ BlockNumber rootBlkno;
+ BlockNumber rblkno;
+ BlockNumber rrlink;
+ OffsetNumber separator;
+ OffsetNumber nitem;
- bool isData;
- bool isLeaf;
- bool isRootSplit;
+ bool isData;
+ bool isLeaf;
+ bool isRootSplit;
- BlockNumber leftChildBlkno;
- BlockNumber updateBlkno;
+ BlockNumber leftChildBlkno;
+ BlockNumber updateBlkno;
- ItemPointerData rightbound; /* used only in posting tree */
+ ItemPointerData rightbound; /* used only in posting tree */
/* follows: list of tuple or ItemPointerData or PostingItem */
} ginxlogSplit;
-#define XLOG_GIN_VACUUM_PAGE 0x40
+#define XLOG_GIN_VACUUM_PAGE 0x40
-typedef struct ginxlogVacuumPage {
- RelFileNode node;
- BlockNumber blkno;
- OffsetNumber nitem;
+typedef struct ginxlogVacuumPage
+{
+ RelFileNode node;
+ BlockNumber blkno;
+ OffsetNumber nitem;
/* follows content of page */
} ginxlogVacuumPage;
-#define XLOG_GIN_DELETE_PAGE 0x50
+#define XLOG_GIN_DELETE_PAGE 0x50
-typedef struct ginxlogDeletePage {
- RelFileNode node;
- BlockNumber blkno;
- BlockNumber parentBlkno;
- OffsetNumber parentOffset;
- BlockNumber leftBlkno;
- BlockNumber rightLink;
+typedef struct ginxlogDeletePage
+{
+ RelFileNode node;
+ BlockNumber blkno;
+ BlockNumber parentBlkno;
+ OffsetNumber parentOffset;
+ BlockNumber leftBlkno;
+ BlockNumber rightLink;
} ginxlogDeletePage;
/* ginutil.c */
extern Datum ginoptions(PG_FUNCTION_ARGS);
-extern void initGinState( GinState *state, Relation index );
+extern void initGinState(GinState *state, Relation index);
extern Buffer GinNewBuffer(Relation index);
extern void GinInitBuffer(Buffer b, uint32 f);
extern void GinInitPage(Page page, uint32 f, Size pageSize);
-extern int compareEntries(GinState *ginstate, Datum a, Datum b);
-extern Datum* extractEntriesS(GinState *ginstate, Datum value, uint32 *nentries);
-extern Datum* extractEntriesSU(GinState *ginstate, Datum value, uint32 *nentries);
-extern Page GinPageGetCopyPage( Page page );
+extern int compareEntries(GinState *ginstate, Datum a, Datum b);
+extern Datum *extractEntriesS(GinState *ginstate, Datum value, uint32 *nentries);
+extern Datum *extractEntriesSU(GinState *ginstate, Datum value, uint32 *nentries);
+extern Page GinPageGetCopyPage(Page page);
/* gininsert.c */
extern Datum ginbuild(PG_FUNCTION_ARGS);
/* ginbtree.c */
-typedef struct GinBtreeStack {
- BlockNumber blkno;
- Buffer buffer;
- OffsetNumber off;
- /* predictNumber contains prediction number of pages on current level */
- uint32 predictNumber;
+typedef struct GinBtreeStack
+{
+ BlockNumber blkno;
+ Buffer buffer;
+ OffsetNumber off;
+ /* predictNumber contains prediction number of pages on current level */
+ uint32 predictNumber;
struct GinBtreeStack *parent;
} GinBtreeStack;
typedef struct GinBtreeData *GinBtree;
-typedef struct GinBtreeData {
+typedef struct GinBtreeData
+{
/* search methods */
- BlockNumber (*findChildPage)(GinBtree, GinBtreeStack *);
- bool (*isMoveRight)(GinBtree, Page);
- bool (*findItem)(GinBtree, GinBtreeStack *);
+ BlockNumber (*findChildPage) (GinBtree, GinBtreeStack *);
+ bool (*isMoveRight) (GinBtree, Page);
+ bool (*findItem) (GinBtree, GinBtreeStack *);
/* insert methods */
- OffsetNumber (*findChildPtr)(GinBtree, Page, BlockNumber, OffsetNumber);
- BlockNumber (*getLeftMostPage)(GinBtree, Page);
- bool (*isEnoughSpace)(GinBtree, Buffer, OffsetNumber);
- void (*placeToPage)(GinBtree, Buffer, OffsetNumber, XLogRecData**);
- Page (*splitPage)(GinBtree, Buffer, Buffer, OffsetNumber, XLogRecData**);
- void (*fillRoot)(GinBtree, Buffer, Buffer, Buffer);
+ OffsetNumber (*findChildPtr) (GinBtree, Page, BlockNumber, OffsetNumber);
+ BlockNumber (*getLeftMostPage) (GinBtree, Page);
+ bool (*isEnoughSpace) (GinBtree, Buffer, OffsetNumber);
+ void (*placeToPage) (GinBtree, Buffer, OffsetNumber, XLogRecData **);
+ Page (*splitPage) (GinBtree, Buffer, Buffer, OffsetNumber, XLogRecData **);
+ void (*fillRoot) (GinBtree, Buffer, Buffer, Buffer);
- bool searchMode;
+ bool searchMode;
- Relation index;
- GinState *ginstate;
- bool fullScan;
- bool isBuild;
+ Relation index;
+ GinState *ginstate;
+ bool fullScan;
+ bool isBuild;
- BlockNumber rightblkno;
+ BlockNumber rightblkno;
/* Entry options */
- Datum entryValue;
- IndexTuple entry;
- bool isDelete;
+ Datum entryValue;
+ IndexTuple entry;
+ bool isDelete;
/* Data (posting tree) option */
- ItemPointerData *items;
- uint32 nitem;
- uint32 curitem;
+ ItemPointerData *items;
+ uint32 nitem;
+ uint32 curitem;
- PostingItem pitem;
+ PostingItem pitem;
} GinBtreeData;
-extern GinBtreeStack* ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno);
-extern GinBtreeStack* ginFindLeafPage(GinBtree btree, GinBtreeStack *stack );
-extern void freeGinBtreeStack( GinBtreeStack *stack );
+extern GinBtreeStack *ginPrepareFindLeafPage(GinBtree btree, BlockNumber blkno);
+extern GinBtreeStack *ginFindLeafPage(GinBtree btree, GinBtreeStack *stack);
+extern void freeGinBtreeStack(GinBtreeStack *stack);
extern void ginInsertValue(GinBtree btree, GinBtreeStack *stack);
-extern void findParents( GinBtree btree, GinBtreeStack *stack, BlockNumber rootBlkno);
+extern void findParents(GinBtree btree, GinBtreeStack *stack, BlockNumber rootBlkno);
/* ginentrypage.c */
extern IndexTuple GinFormTuple(GinState *ginstate, Datum key, ItemPointerData *ipd, uint32 nipd);
extern Datum ginGetHighKey(GinState *ginstate, Page page);
-extern void prepareEntryScan( GinBtree btree, Relation index, Datum value, GinState *ginstate);
+extern void prepareEntryScan(GinBtree btree, Relation index, Datum value, GinState *ginstate);
extern void entryFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf);
extern IndexTuple ginPageGetLinkItup(Buffer buf);
/* gindatapage.c */
-extern int compareItemPointers( ItemPointer a, ItemPointer b );
-extern void MergeItemPointers(
- ItemPointerData *dst,
- ItemPointerData *a, uint32 na,
- ItemPointerData *b, uint32 nb
- );
-
-extern void GinDataPageAddItem( Page page, void *data, OffsetNumber offset );
+extern int compareItemPointers(ItemPointer a, ItemPointer b);
+extern void
+MergeItemPointers(
+ ItemPointerData *dst,
+ ItemPointerData *a, uint32 na,
+ ItemPointerData *b, uint32 nb
+);
+
+extern void GinDataPageAddItem(Page page, void *data, OffsetNumber offset);
extern void PageDeletePostingItem(Page page, OffsetNumber offset);
-typedef struct {
- GinBtreeData btree;
- GinBtreeStack *stack;
+typedef struct
+{
+ GinBtreeData btree;
+ GinBtreeStack *stack;
} GinPostingTreeScan;
-extern GinPostingTreeScan* prepareScanPostingTree( Relation index,
- BlockNumber rootBlkno, bool searchMode);
-extern void insertItemPointer(GinPostingTreeScan *gdi,
- ItemPointerData *items, uint32 nitem);
-extern Buffer scanBeginPostingTree( GinPostingTreeScan *gdi );
+extern GinPostingTreeScan *prepareScanPostingTree(Relation index,
+ BlockNumber rootBlkno, bool searchMode);
+extern void insertItemPointer(GinPostingTreeScan *gdi,
+ ItemPointerData *items, uint32 nitem);
+extern Buffer scanBeginPostingTree(GinPostingTreeScan *gdi);
extern void dataFillRoot(GinBtree btree, Buffer root, Buffer lbuf, Buffer rbuf);
-extern void prepareDataScan( GinBtree btree, Relation index);
+extern void prepareDataScan(GinBtree btree, Relation index);
+
/* ginscan.c */
typedef struct GinScanEntryData *GinScanEntry;
-typedef struct GinScanEntryData {
+typedef struct GinScanEntryData
+{
/* link to the equals entry in current scan key */
- GinScanEntry master;
+ GinScanEntry master;
- /* link to values reported to consistentFn,
- points to GinScanKey->entryRes[i]*/
- bool *pval;
+ /*
+ * link to values reported to consistentFn, points to
+ * GinScanKey->entryRes[i]
+ */
+ bool *pval;
- /* entry, got from extractQueryFn */
- Datum entry;
+ /* entry, got from extractQueryFn */
+ Datum entry;
/* current ItemPointer to heap, its offset in buffer and buffer */
- ItemPointerData curItem;
- OffsetNumber offset;
- Buffer buffer;
+ ItemPointerData curItem;
+ OffsetNumber offset;
+ Buffer buffer;
/* in case of Posing list */
- ItemPointerData *list;
- uint32 nlist;
+ ItemPointerData *list;
+ uint32 nlist;
- bool isFinished;
- bool reduceResult;
- uint32 predictNumberResult;
+ bool isFinished;
+ bool reduceResult;
+ uint32 predictNumberResult;
} GinScanEntryData;
-typedef struct GinScanKeyData {
- /* Number of entries in query (got by extractQueryFn) */
- uint32 nentries;
+typedef struct GinScanKeyData
+{
+ /* Number of entries in query (got by extractQueryFn) */
+ uint32 nentries;
/* array of ItemPointer result, reported to consistentFn */
- bool *entryRes;
+ bool *entryRes;
- /* array of scans per entry */
- GinScanEntry scanEntry;
+ /* array of scans per entry */
+ GinScanEntry scanEntry;
/* for calling consistentFn(GinScanKey->entryRes, strategy, query) */
- StrategyNumber strategy;
- Datum query;
+ StrategyNumber strategy;
+ Datum query;
- ItemPointerData curItem;
- bool firstCall;
- bool isFinished;
+ ItemPointerData curItem;
+ bool firstCall;
+ bool isFinished;
} GinScanKeyData;
-typedef GinScanKeyData *GinScanKey;
+typedef GinScanKeyData *GinScanKey;
-typedef struct GinScanOpaqueData {
- MemoryContext tempCtx;
- GinState ginstate;
+typedef struct GinScanOpaqueData
+{
+ MemoryContext tempCtx;
+ GinState ginstate;
- GinScanKey keys;
- uint32 nkeys;
+ GinScanKey keys;
+ uint32 nkeys;
- GinScanKey markPos;
+ GinScanKey markPos;
} GinScanOpaqueData;
typedef GinScanOpaqueData *GinScanOpaque;
extern void newScanKey(IndexScanDesc scan);
/* ginget.c */
-extern DLLIMPORT int GinFuzzySearchLimit;
+extern DLLIMPORT int GinFuzzySearchLimit;
-#define ItemPointerSetMax(p) ItemPointerSet( (p), (BlockNumber)0xffffffff, (OffsetNumber)0xffff )
-#define ItemPointerIsMax(p) ( ItemPointerGetBlockNumber(p) == (BlockNumber)0xffffffff && ItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff )
-#define ItemPointerSetMin(p) ItemPointerSet( (p), (BlockNumber)0, (OffsetNumber)0)
-#define ItemPointerIsMin(p) ( ItemPointerGetBlockNumber(p) == (BlockNumber)0 && ItemPointerGetOffsetNumber(p) == (OffsetNumber)0 )
+#define ItemPointerSetMax(p) ItemPointerSet( (p), (BlockNumber)0xffffffff, (OffsetNumber)0xffff )
+#define ItemPointerIsMax(p) ( ItemPointerGetBlockNumber(p) == (BlockNumber)0xffffffff && ItemPointerGetOffsetNumber(p) == (OffsetNumber)0xffff )
+#define ItemPointerSetMin(p) ItemPointerSet( (p), (BlockNumber)0, (OffsetNumber)0)
+#define ItemPointerIsMin(p) ( ItemPointerGetBlockNumber(p) == (BlockNumber)0 && ItemPointerGetOffsetNumber(p) == (OffsetNumber)0 )
extern Datum gingetmulti(PG_FUNCTION_ARGS);
extern Datum gingettuple(PG_FUNCTION_ARGS);
extern Datum ginarrayconsistent(PG_FUNCTION_ARGS);
/* ginbulk.c */
-typedef struct EntryAccumulator {
- Datum value;
- uint32 length;
- uint32 number;
+typedef struct EntryAccumulator
+{
+ Datum value;
+ uint32 length;
+ uint32 number;
ItemPointerData *list;
bool shouldSort;
- struct EntryAccumulator *left;
- struct EntryAccumulator *right;
+ struct EntryAccumulator *left;
+ struct EntryAccumulator *right;
} EntryAccumulator;
-typedef struct {
- GinState *ginstate;
- EntryAccumulator *entries;
+typedef struct
+{
+ GinState *ginstate;
+ EntryAccumulator *entries;
uint32 maxdepth;
- EntryAccumulator **stack;
- uint32 stackpos;
- uint32 allocatedMemory;
+ EntryAccumulator **stack;
+ uint32 stackpos;
+ uint32 allocatedMemory;
- uint32 length;
- EntryAccumulator *entryallocator;
+ uint32 length;
+ EntryAccumulator *entryallocator;
} BuildAccumulator;
extern void ginInitBA(BuildAccumulator *accum);
-extern void ginInsertRecordBA( BuildAccumulator *accum,
- ItemPointer heapptr, Datum *entries, uint32 nentry );
-extern ItemPointerData* ginGetEntry(BuildAccumulator *accum, Datum *entry, uint32 *n);
+extern void ginInsertRecordBA(BuildAccumulator *accum,
+ ItemPointer heapptr, Datum *entries, uint32 nentry);
+extern ItemPointerData *ginGetEntry(BuildAccumulator *accum, Datum *entry, uint32 *n);
#endif
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.55 2006/09/10 00:29:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.56 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* PickSplit should check spl_(r|l)datum_exists. If it is 'true',
* that corresponding spl_(r|l)datum already defined and
* PickSplit should use that value. PickSplit should always set
- * spl_(r|l)datum_exists to false: GiST will check value to
+ * spl_(r|l)datum_exists to false: GiST will check value to
* control supportng this feature by PickSplit...
*/
typedef struct GIST_SPLITVEC
OffsetNumber *spl_left; /* array of entries that go left */
int spl_nleft; /* size of this array */
Datum spl_ldatum; /* Union of keys in spl_left */
- bool spl_ldatum_exists; /* true, if spl_ldatum already exists. */
+ bool spl_ldatum_exists; /* true, if spl_ldatum already exists. */
OffsetNumber *spl_right; /* array of entries that go right */
int spl_nright; /* size of the array */
Datum spl_rdatum; /* Union of keys in spl_right */
- bool spl_rdatum_exists; /* true, if spl_rdatum already exists. */
+ bool spl_rdatum_exists; /* true, if spl_rdatum already exists. */
} GIST_SPLITVEC;
/*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/gist_private.h,v 1.23 2006/08/07 16:57:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/gist_private.h,v 1.24 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
gistxlogPage block;
IndexTupleData *list;
int lenlist;
- IndexTuple itup; /* union key for page */
+ IndexTuple itup; /* union key for page */
Page page; /* to operate */
Buffer buffer; /* to write after all proceed */
struct GISTInsertStack *next;
} GISTInsertStack;
-typedef struct GistSplitVector {
- GIST_SPLITVEC splitVector; /* to/from PickSplit method */
+typedef struct GistSplitVector
+{
+ GIST_SPLITVEC splitVector; /* to/from PickSplit method */
- Datum spl_lattr[INDEX_MAX_KEYS]; /* Union of subkeys in spl_left */
- bool spl_lisnull[INDEX_MAX_KEYS];
- bool spl_leftvalid;
+ Datum spl_lattr[INDEX_MAX_KEYS]; /* Union of subkeys in
+ * spl_left */
+ bool spl_lisnull[INDEX_MAX_KEYS];
+ bool spl_leftvalid;
- Datum spl_rattr[INDEX_MAX_KEYS]; /* Union of subkeys in spl_right */
- bool spl_risnull[INDEX_MAX_KEYS];
- bool spl_rightvalid;
+ Datum spl_rattr[INDEX_MAX_KEYS]; /* Union of subkeys in
+ * spl_right */
+ bool spl_risnull[INDEX_MAX_KEYS];
+ bool spl_rightvalid;
- bool *spl_equiv; /* equivalent tuples which can be freely
- * distributed between left and right pages */
+ bool *spl_equiv; /* equivalent tuples which can be freely
+ * distributed between left and right pages */
} GistSplitVector;
#define XLogRecPtrIsInvalid( r ) ( (r).xlogid == 0 && (r).xrecoff == 0 )
extern IndexTuple gist_form_invalid_tuple(BlockNumber blkno);
extern XLogRecData *formUpdateRdata(RelFileNode node, Buffer buffer,
- OffsetNumber *todelete, int ntodelete,
+ OffsetNumber *todelete, int ntodelete,
IndexTuple *itup, int ituplen, ItemPointer key);
extern XLogRecData *formSplitRdata(RelFileNode node,
/* gistutil.c */
#define GiSTPageSize \
- ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GISTPageOpaqueData)) )
+ ( BLCKSZ - SizeOfPageHeaderData - MAXALIGN(sizeof(GISTPageOpaqueData)) )
#define GIST_MIN_FILLFACTOR 10
#define GIST_DEFAULT_FILLFACTOR 90
extern IndexTuple *gistjoinvector(
IndexTuple *itvec, int *len,
IndexTuple *additvec, int addlen);
-extern IndexTupleData* gistfillitupvec(IndexTuple *vec, int veclen, int *memlen);
+extern IndexTupleData *gistfillitupvec(IndexTuple *vec, int veclen, int *memlen);
extern IndexTuple gistunion(Relation r, IndexTuple *itvec,
int len, GISTSTATE *giststate);
bool l, bool isNull);
extern float gistpenalty(GISTSTATE *giststate, int attno,
- GISTENTRY *key1, bool isNull1,
- GISTENTRY *key2, bool isNull2);
+ GISTENTRY *key1, bool isNull1,
+ GISTENTRY *key2, bool isNull2);
extern bool gistMakeUnionItVec(GISTSTATE *giststate, IndexTuple *itvec, int len, int startkey,
- Datum *attr, bool *isnull );
+ Datum *attr, bool *isnull);
extern bool gistKeyIsEQ(GISTSTATE *giststate, int attno, Datum a, Datum b);
extern void gistDeCompressAtt(GISTSTATE *giststate, Relation r, IndexTuple tuple, Page p,
- OffsetNumber o, GISTENTRY *attdata, bool *isnull);
+ OffsetNumber o, GISTENTRY *attdata, bool *isnull);
-extern void gistMakeUnionKey( GISTSTATE *giststate, int attno,
- GISTENTRY *entry1, bool isnull1,
- GISTENTRY *entry2, bool isnull2,
- Datum *dst, bool *dstisnull );
+extern void gistMakeUnionKey(GISTSTATE *giststate, int attno,
+ GISTENTRY *entry1, bool isnull1,
+ GISTENTRY *entry2, bool isnull2,
+ Datum *dst, bool *dstisnull);
/* gistvacuum.c */
extern Datum gistbulkdelete(PG_FUNCTION_ARGS);
extern Datum gistvacuumcleanup(PG_FUNCTION_ARGS);
/* gistsplit.c */
-extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup,
- int len, GISTSTATE *giststate,
- GistSplitVector *v, GistEntryVector *entryvec,
- int attno);
+extern void gistSplitByKey(Relation r, Page page, IndexTuple *itup,
+ int len, GISTSTATE *giststate,
+ GistSplitVector *v, GistEntryVector *entryvec,
+ int attno);
#endif /* GIST_PRIVATE_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.115 2006/08/18 16:09:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.116 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum *values, char *nulls);
extern void heap_freetuple(HeapTuple htup);
extern MinimalTuple heap_form_minimal_tuple(TupleDesc tupleDescriptor,
- Datum *values, bool *isnull);
+ Datum *values, bool *isnull);
extern void heap_free_minimal_tuple(MinimalTuple mtup);
extern MinimalTuple heap_copy_minimal_tuple(MinimalTuple mtup);
extern HeapTuple heap_tuple_from_minimal_tuple(MinimalTuple mtup);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/hio.h,v 1.32 2006/07/13 17:47:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/hio.h,v 1.33 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void RelationPutHeapTuple(Relation relation, Buffer buffer,
HeapTuple tuple);
extern Buffer RelationGetBufferForTuple(Relation relation, Size len,
- Buffer otherBuffer, bool use_fsm);
+ Buffer otherBuffer, bool use_fsm);
#endif /* HIO_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.85 2006/07/13 17:47:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.86 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* MinimalTuple is an alternate representation that is used for transient
* tuples inside the executor, in places where transaction status information
* is not required, the tuple rowtype is known, and shaving off a few bytes
- * is worthwhile because we need to store many tuples. The representation
+ * is worthwhile because we need to store many tuples. The representation
* is chosen so that tuple access routines can work with either full or
* minimal tuples via a HeapTupleData pointer structure. The access routines
* see no difference, except that they must not access the transaction status
* and thereby prevent accidental use of the nonexistent fields.
*
* MinimalTupleData contains a length word, some padding, and fields matching
- * HeapTupleHeaderData beginning with t_natts. The padding is chosen so that
+ * HeapTupleHeaderData beginning with t_natts. The padding is chosen so that
* offsetof(t_natts) is the same modulo MAXIMUM_ALIGNOF in both structs.
* This makes data alignment rules equivalent in both cases.
*
* limited contexts where the code knows that case #1 will never apply.)
*
* * Separately allocated minimal tuple: t_data points MINIMAL_TUPLE_OFFSET
- * bytes before the start of a MinimalTuple. As with the previous case,
+ * bytes before the start of a MinimalTuple. As with the previous case,
* this can't be told apart from case #1 by inspection; code setting up
* or destroying this representation has to know what it's doing.
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.104 2006/08/24 01:18:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.105 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* and status. If the page is deleted, we replace the level with the
* next-transaction-ID value indicating when it is safe to reclaim the page.
*
- * We also store a "vacuum cycle ID". When a page is split while VACUUM is
+ * We also store a "vacuum cycle ID". When a page is split while VACUUM is
* processing the index, a nonzero value associated with the VACUUM run is
- * stored into both halves of the split page. (If VACUUM is not running,
- * both pages receive zero cycleids.) This allows VACUUM to detect whether
+ * stored into both halves of the split page. (If VACUUM is not running,
+ * both pages receive zero cycleids.) This allows VACUUM to detect whether
* a page was split since it started, with a small probability of false match
* if the page was last split some exact multiple of 65536 VACUUMs ago.
* Also, during a split, the BTP_SPLIT_END flag is cleared in the left
#define BTP_META (1 << 3) /* meta-page */
#define BTP_HALF_DEAD (1 << 4) /* empty, but still in tree */
#define BTP_SPLIT_END (1 << 5) /* rightmost page of split group */
-#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DELETEd tuples */
+#define BTP_HAS_GARBAGE (1 << 6) /* page has LP_DELETEd tuples */
/*
( (i1).ip_blkid.bi_hi == (i2).ip_blkid.bi_hi && \
(i1).ip_blkid.bi_lo == (i2).ip_blkid.bi_lo && \
(i1).ip_posid == (i2).ip_posid )
-#define BTEntrySame(i1, i2) \
+#define BTEntrySame(i1, i2) \
BTTidSame((i1)->t_tid, (i2)->t_tid)
#define XLOG_BTREE_SPLIT_R_ROOT 0x60 /* as above, new item on right */
#define XLOG_BTREE_DELETE 0x70 /* delete leaf index tuple */
#define XLOG_BTREE_DELETE_PAGE 0x80 /* delete an entire page */
-#define XLOG_BTREE_DELETE_PAGE_META 0x90 /* same, plus update metapage */
+#define XLOG_BTREE_DELETE_PAGE_META 0x90 /* same, plus update metapage */
#define XLOG_BTREE_NEWROOT 0xA0 /* new root page */
/*
* BTScanOpaqueData is the btree-private state needed for an indexscan.
* This consists of preprocessed scan keys (see _bt_preprocess_keys() for
* details of the preprocessing), information about the current location
- * of the scan, and information about the marked location, if any. (We use
+ * of the scan, and information about the marked location, if any. (We use
* BTScanPosData to represent the data needed for each of current and marked
- * locations.) In addition we can remember some known-killed index entries
+ * locations.) In addition we can remember some known-killed index entries
* that must be marked before we can move off the current page.
*
* Index scans work a page at a time: we pin and read-lock the page, identify
* all the matching items on the page and save them in BTScanPosData, then
* release the read-lock while returning the items to the caller for
- * processing. This approach minimizes lock/unlock traffic. Note that we
+ * processing. This approach minimizes lock/unlock traffic. Note that we
* keep the pin on the index page until the caller is done with all the items
- * (this is needed for VACUUM synchronization, see nbtree/README). When we
+ * (this is needed for VACUUM synchronization, see nbtree/README). When we
* are ready to step to the next page, if the caller has told us any of the
* items were killed, we re-lock the page to mark them killed, then unlock.
* Finally we drop the pin and step to the next page in the appropriate
int lastItem; /* last valid index in items[] */
int itemIndex; /* current index in items[] */
- BTScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */
+ BTScanPosItem items[MaxIndexTuplesPerPage]; /* MUST BE LAST */
} BTScanPosData;
typedef BTScanPosData *BTScanPos;
int numKilled; /* number of currently stored items */
/*
- * If the marked position is on the same page as current position,
- * we don't use markPos, but just keep the marked itemIndex in
- * markItemIndex (all the rest of currPos is valid for the mark position).
- * Hence, to determine if there is a mark, first look at markItemIndex,
- * then at markPos.
+ * If the marked position is on the same page as current position, we
+ * don't use markPos, but just keep the marked itemIndex in markItemIndex
+ * (all the rest of currPos is valid for the mark position). Hence, to
+ * determine if there is a mark, first look at markItemIndex, then at
+ * markPos.
*/
int markItemIndex; /* itemIndex, or -1 if not valid */
/*
* We use these private sk_flags bits in preprocessed scan keys
*/
-#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
-#define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
+#define SK_BT_REQFWD 0x00010000 /* required to continue forward scan */
+#define SK_BT_REQBKWD 0x00020000 /* required to continue backward scan */
/*
extern void _bt_freestack(BTStack stack);
extern void _bt_preprocess_keys(IndexScanDesc scan);
extern bool _bt_checkkeys(IndexScanDesc scan,
- Page page, OffsetNumber offnum,
- ScanDirection dir, bool *continuescan);
+ Page page, OffsetNumber offnum,
+ ScanDirection dir, bool *continuescan);
extern void _bt_killitems(IndexScanDesc scan, bool haveLock);
extern BTCycleId _bt_vacuum_cycleid(Relation rel);
extern BTCycleId _bt_start_vacuum(Relation rel);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/reloptions.h,v 1.1 2006/07/03 22:45:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/reloptions.h,v 1.2 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "nodes/pg_list.h"
extern Datum transformRelOptions(Datum oldOptions, List *defList,
- bool ignoreOids, bool isReset);
+ bool ignoreOids, bool isReset);
extern void parseRelOptions(Datum options, int numkeywords,
- const char * const *keywords,
- char **values, bool validate);
+ const char *const * keywords,
+ char **values, bool validate);
extern bytea *default_reloptions(Datum reloptions, bool validate,
- int minFillfactor, int defaultFillfactor);
+ int minFillfactor, int defaultFillfactor);
extern bytea *heap_reloptions(char relkind, Datum reloptions, bool validate);
extern bytea *index_reloptions(RegProcedure amoptions, Datum reloptions,
- bool validate);
+ bool validate);
#endif /* RELOPTIONS_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.49 2006/07/31 20:09:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/relscan.h,v 1.50 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int rs_nkeys; /* number of scan keys */
ScanKey rs_key; /* array of scan key descriptors */
BlockNumber rs_nblocks; /* number of blocks to scan */
- bool rs_pageatatime; /* verify visibility page-at-a-time? */
+ bool rs_pageatatime; /* verify visibility page-at-a-time? */
/* scan current state */
bool rs_inited; /* false = scan not init'd yet */
HeapTupleData rs_ctup; /* current tuple in scan, if any */
- BlockNumber rs_cblock; /* current block # in scan, if any */
+ BlockNumber rs_cblock; /* current block # in scan, if any */
Buffer rs_cbuf; /* current buffer in scan, if any */
/* NB: if rs_cbuf is not InvalidBuffer, we hold a pin on that buffer */
ItemPointerData rs_mctid; /* marked scan position, if any */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.32 2006/03/05 15:58:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.33 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* must be sorted according to the leading column number.
*
* The subsidiary ScanKey array appears in logical column order of the row
- * comparison, which may be different from index column order. The array
+ * comparison, which may be different from index column order. The array
* elements are like a normal ScanKey array except that:
* sk_flags must include SK_ROW_MEMBER, plus SK_ROW_END in the last
* element (needed since row header does not include a count)
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.18 2006/03/05 15:58:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.19 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
LWLockId ctllock, const char *subdir);
extern int SimpleLruZeroPage(SlruCtl ctl, int pageno);
extern int SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid);
-extern int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno,
- TransactionId xid);
+extern int SimpleLruReadPage_ReadOnly(SlruCtl ctl, int pageno,
+ TransactionId xid);
extern void SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata);
extern void SimpleLruFlush(SlruCtl ctl, bool checkpoint);
extern void SimpleLruTruncate(SlruCtl ctl, int cutoffPage);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/tupdesc.h,v 1.50 2006/06/16 18:42:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/tupdesc.h,v 1.51 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if ((tupdesc)->tdrefcount >= 0) \
DecrTupleDescRefCount(tupdesc); \
} while (0)
-
+
extern bool equalTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2);
extern void TupleDescInitEntry(TupleDesc desc,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.16 2006/08/17 23:04:08 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.17 2006/10/04 00:30:07 momjian Exp $
*/
#ifndef XLOG_INTERNAL_H
#define XLOG_INTERNAL_H
extern const RmgrData RmgrTable[];
-/*
+/*
* Exported to support xlog switching from bgwriter
*/
extern time_t GetLastSegSwitchTime(void);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/bootstrap/bootstrap.h,v 1.43 2006/08/15 22:36:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/bootstrap/bootstrap.h,v 1.44 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void build_indices(void);
extern void boot_get_type_io_data(Oid typid,
- int16 *typlen,
- bool *typbyval,
- char *typalign,
- char *typdelim,
- Oid *typioparam,
- Oid *typinput,
- Oid *typoutput);
+ int16 *typlen,
+ bool *typbyval,
+ char *typalign,
+ char *typdelim,
+ Oid *typioparam,
+ Oid *typinput,
+ Oid *typoutput);
extern int boot_yyparse(void);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/c.h,v 1.213 2006/10/03 20:33:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/c.h,v 1.214 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "pg_config.h"
#include "pg_config_manual.h" /* must be after pg_config.h */
-#if !defined(WIN32) && !defined(__CYGWIN__) /* win32 will include further down */
+#if !defined(WIN32) && !defined(__CYGWIN__) /* win32 will include further
+ * down */
#include "pg_config_os.h" /* must be before any system header files */
#endif
#include "postgres_ext.h"
/*
* Support macros for escaping strings. escape_backslash should be TRUE
- * if generating a non-standard-conforming string. Prefixing a string
+ * if generating a non-standard-conforming string. Prefixing a string
* with ESCAPE_STRING_SYNTAX guarantees it is non-standard-conforming.
* Beware of multiple evaluation of the "ch" argument!
*/
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.57 2006/07/28 18:33:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.58 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert ( 2803 int8inc - 0 20 "0" ));
/* var_pop */
-DATA(insert ( 2718 int8_accum numeric_var_pop 0 1231 "{0,0,0}" ));
-DATA(insert ( 2719 int4_accum numeric_var_pop 0 1231 "{0,0,0}" ));
-DATA(insert ( 2720 int2_accum numeric_var_pop 0 1231 "{0,0,0}" ));
+DATA(insert ( 2718 int8_accum numeric_var_pop 0 1231 "{0,0,0}" ));
+DATA(insert ( 2719 int4_accum numeric_var_pop 0 1231 "{0,0,0}" ));
+DATA(insert ( 2720 int2_accum numeric_var_pop 0 1231 "{0,0,0}" ));
DATA(insert ( 2721 float4_accum float8_var_pop 0 1022 "{0,0,0}" ));
DATA(insert ( 2722 float8_accum float8_var_pop 0 1022 "{0,0,0}" ));
DATA(insert ( 2723 numeric_accum numeric_var_pop 0 1231 "{0,0,0}" ));
DATA(insert ( 2714 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
DATA(insert ( 2715 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" ));
DATA(insert ( 2716 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" ));
-DATA(insert ( 2717 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
+DATA(insert ( 2717 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
/* stddev: historical Postgres syntax for stddev_samp */
DATA(insert ( 2154 int8_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
DATA(insert ( 2156 int2_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
DATA(insert ( 2157 float4_accum float8_stddev_samp 0 1022 "{0,0,0}" ));
DATA(insert ( 2158 float8_accum float8_stddev_samp 0 1022 "{0,0,0}" ));
-DATA(insert ( 2159 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
+DATA(insert ( 2159 numeric_accum numeric_stddev_samp 0 1231 "{0,0,0}" ));
/* SQL2003 binary regression aggregates */
DATA(insert ( 2818 int8inc_float8_float8 - 0 20 "0" ));
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.74 2006/09/10 00:29:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.75 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
/*
* gin _abstime_ops
*/
-DATA(insert ( 2753 0 1 f 2750 ));
-DATA(insert ( 2753 0 2 f 2751 ));
-DATA(insert ( 2753 0 3 t 2752 ));
+DATA(insert ( 2753 0 1 f 2750 ));
+DATA(insert ( 2753 0 2 f 2751 ));
+DATA(insert ( 2753 0 3 t 2752 ));
DATA(insert ( 2753 0 4 t 1070 ));
/*
* gin _bit_ops
*/
-DATA(insert ( 2754 0 1 f 2750 ));
-DATA(insert ( 2754 0 2 f 2751 ));
-DATA(insert ( 2754 0 3 t 2752 ));
+DATA(insert ( 2754 0 1 f 2750 ));
+DATA(insert ( 2754 0 2 f 2751 ));
+DATA(insert ( 2754 0 3 t 2752 ));
DATA(insert ( 2754 0 4 t 1070 ));
/*
* gin _bool_ops
*/
-DATA(insert ( 2755 0 1 f 2750 ));
-DATA(insert ( 2755 0 2 f 2751 ));
-DATA(insert ( 2755 0 3 t 2752 ));
+DATA(insert ( 2755 0 1 f 2750 ));
+DATA(insert ( 2755 0 2 f 2751 ));
+DATA(insert ( 2755 0 3 t 2752 ));
DATA(insert ( 2755 0 4 t 1070 ));
/*
* gin _bpchar_ops
*/
-DATA(insert ( 2756 0 1 f 2750 ));
-DATA(insert ( 2756 0 2 f 2751 ));
-DATA(insert ( 2756 0 3 t 2752 ));
+DATA(insert ( 2756 0 1 f 2750 ));
+DATA(insert ( 2756 0 2 f 2751 ));
+DATA(insert ( 2756 0 3 t 2752 ));
DATA(insert ( 2756 0 4 t 1070 ));
/*
* gin _bytea_ops
*/
-DATA(insert ( 2757 0 1 f 2750 ));
-DATA(insert ( 2757 0 2 f 2751 ));
-DATA(insert ( 2757 0 3 t 2752 ));
+DATA(insert ( 2757 0 1 f 2750 ));
+DATA(insert ( 2757 0 2 f 2751 ));
+DATA(insert ( 2757 0 3 t 2752 ));
DATA(insert ( 2757 0 4 t 1070 ));
/*
* gin _char_ops
*/
-DATA(insert ( 2758 0 1 f 2750 ));
-DATA(insert ( 2758 0 2 f 2751 ));
-DATA(insert ( 2758 0 3 t 2752 ));
+DATA(insert ( 2758 0 1 f 2750 ));
+DATA(insert ( 2758 0 2 f 2751 ));
+DATA(insert ( 2758 0 3 t 2752 ));
DATA(insert ( 2758 0 4 t 1070 ));
/*
* gin _cidr_ops
*/
-DATA(insert ( 2759 0 1 f 2750 ));
-DATA(insert ( 2759 0 2 f 2751 ));
-DATA(insert ( 2759 0 3 t 2752 ));
+DATA(insert ( 2759 0 1 f 2750 ));
+DATA(insert ( 2759 0 2 f 2751 ));
+DATA(insert ( 2759 0 3 t 2752 ));
DATA(insert ( 2759 0 4 t 1070 ));
/*
* gin _date_ops
*/
-DATA(insert ( 2760 0 1 f 2750 ));
-DATA(insert ( 2760 0 2 f 2751 ));
-DATA(insert ( 2760 0 3 t 2752 ));
+DATA(insert ( 2760 0 1 f 2750 ));
+DATA(insert ( 2760 0 2 f 2751 ));
+DATA(insert ( 2760 0 3 t 2752 ));
DATA(insert ( 2760 0 4 t 1070 ));
/*
* gin _float4_ops
*/
-DATA(insert ( 2761 0 1 f 2750 ));
-DATA(insert ( 2761 0 2 f 2751 ));
-DATA(insert ( 2761 0 3 t 2752 ));
+DATA(insert ( 2761 0 1 f 2750 ));
+DATA(insert ( 2761 0 2 f 2751 ));
+DATA(insert ( 2761 0 3 t 2752 ));
DATA(insert ( 2761 0 4 t 1070 ));
/*
* gin _float8_ops
*/
-DATA(insert ( 2762 0 1 f 2750 ));
-DATA(insert ( 2762 0 2 f 2751 ));
-DATA(insert ( 2762 0 3 t 2752 ));
+DATA(insert ( 2762 0 1 f 2750 ));
+DATA(insert ( 2762 0 2 f 2751 ));
+DATA(insert ( 2762 0 3 t 2752 ));
DATA(insert ( 2762 0 4 t 1070 ));
/*
* gin _inet_ops
*/
-DATA(insert ( 2763 0 1 f 2750 ));
-DATA(insert ( 2763 0 2 f 2751 ));
-DATA(insert ( 2763 0 3 t 2752 ));
+DATA(insert ( 2763 0 1 f 2750 ));
+DATA(insert ( 2763 0 2 f 2751 ));
+DATA(insert ( 2763 0 3 t 2752 ));
DATA(insert ( 2763 0 4 t 1070 ));
/*
* gin _int2_ops
*/
-DATA(insert ( 2764 0 1 f 2750 ));
-DATA(insert ( 2764 0 2 f 2751 ));
-DATA(insert ( 2764 0 3 t 2752 ));
+DATA(insert ( 2764 0 1 f 2750 ));
+DATA(insert ( 2764 0 2 f 2751 ));
+DATA(insert ( 2764 0 3 t 2752 ));
DATA(insert ( 2764 0 4 t 1070 ));
/*
* gin _int8_ops
*/
-DATA(insert ( 2765 0 1 f 2750 ));
-DATA(insert ( 2765 0 2 f 2751 ));
-DATA(insert ( 2765 0 3 t 2752 ));
+DATA(insert ( 2765 0 1 f 2750 ));
+DATA(insert ( 2765 0 2 f 2751 ));
+DATA(insert ( 2765 0 3 t 2752 ));
DATA(insert ( 2765 0 4 t 1070 ));
/*
* gin _interval_ops
*/
-DATA(insert ( 2766 0 1 f 2750 ));
-DATA(insert ( 2766 0 2 f 2751 ));
-DATA(insert ( 2766 0 3 t 2752 ));
+DATA(insert ( 2766 0 1 f 2750 ));
+DATA(insert ( 2766 0 2 f 2751 ));
+DATA(insert ( 2766 0 3 t 2752 ));
DATA(insert ( 2766 0 4 t 1070 ));
/*
* gin _macaddr_ops
*/
-DATA(insert ( 2767 0 1 f 2750 ));
-DATA(insert ( 2767 0 2 f 2751 ));
-DATA(insert ( 2767 0 3 t 2752 ));
+DATA(insert ( 2767 0 1 f 2750 ));
+DATA(insert ( 2767 0 2 f 2751 ));
+DATA(insert ( 2767 0 3 t 2752 ));
DATA(insert ( 2767 0 4 t 1070 ));
/*
* gin _name_ops
*/
-DATA(insert ( 2768 0 1 f 2750 ));
-DATA(insert ( 2768 0 2 f 2751 ));
-DATA(insert ( 2768 0 3 t 2752 ));
+DATA(insert ( 2768 0 1 f 2750 ));
+DATA(insert ( 2768 0 2 f 2751 ));
+DATA(insert ( 2768 0 3 t 2752 ));
DATA(insert ( 2768 0 4 t 1070 ));
/*
* gin _numeric_ops
*/
-DATA(insert ( 2769 0 1 f 2750 ));
-DATA(insert ( 2769 0 2 f 2751 ));
-DATA(insert ( 2769 0 3 t 2752 ));
+DATA(insert ( 2769 0 1 f 2750 ));
+DATA(insert ( 2769 0 2 f 2751 ));
+DATA(insert ( 2769 0 3 t 2752 ));
DATA(insert ( 2769 0 4 t 1070 ));
/*
* gin _oid_ops
*/
-DATA(insert ( 2770 0 1 f 2750 ));
-DATA(insert ( 2770 0 2 f 2751 ));
-DATA(insert ( 2770 0 3 t 2752 ));
+DATA(insert ( 2770 0 1 f 2750 ));
+DATA(insert ( 2770 0 2 f 2751 ));
+DATA(insert ( 2770 0 3 t 2752 ));
DATA(insert ( 2770 0 4 t 1070 ));
/*
* gin _oidvector_ops
*/
-DATA(insert ( 2771 0 1 f 2750 ));
-DATA(insert ( 2771 0 2 f 2751 ));
-DATA(insert ( 2771 0 3 t 2752 ));
+DATA(insert ( 2771 0 1 f 2750 ));
+DATA(insert ( 2771 0 2 f 2751 ));
+DATA(insert ( 2771 0 3 t 2752 ));
DATA(insert ( 2771 0 4 t 1070 ));
/*
* gin _time_ops
*/
-DATA(insert ( 2772 0 1 f 2750 ));
-DATA(insert ( 2772 0 2 f 2751 ));
-DATA(insert ( 2772 0 3 t 2752 ));
+DATA(insert ( 2772 0 1 f 2750 ));
+DATA(insert ( 2772 0 2 f 2751 ));
+DATA(insert ( 2772 0 3 t 2752 ));
DATA(insert ( 2772 0 4 t 1070 ));
/*
* gin _timestamptz_ops
*/
-DATA(insert ( 2773 0 1 f 2750 ));
-DATA(insert ( 2773 0 2 f 2751 ));
-DATA(insert ( 2773 0 3 t 2752 ));
+DATA(insert ( 2773 0 1 f 2750 ));
+DATA(insert ( 2773 0 2 f 2751 ));
+DATA(insert ( 2773 0 3 t 2752 ));
DATA(insert ( 2773 0 4 t 1070 ));
/*
* gin _timetz_ops
*/
-DATA(insert ( 2774 0 1 f 2750 ));
-DATA(insert ( 2774 0 2 f 2751 ));
-DATA(insert ( 2774 0 3 t 2752 ));
+DATA(insert ( 2774 0 1 f 2750 ));
+DATA(insert ( 2774 0 2 f 2751 ));
+DATA(insert ( 2774 0 3 t 2752 ));
DATA(insert ( 2774 0 4 t 1070 ));
/*
* gin _varbit_ops
*/
-DATA(insert ( 2775 0 1 f 2750 ));
-DATA(insert ( 2775 0 2 f 2751 ));
-DATA(insert ( 2775 0 3 t 2752 ));
+DATA(insert ( 2775 0 1 f 2750 ));
+DATA(insert ( 2775 0 2 f 2751 ));
+DATA(insert ( 2775 0 3 t 2752 ));
DATA(insert ( 2775 0 4 t 1070 ));
/*
* gin _varchar_ops
*/
-DATA(insert ( 2776 0 1 f 2750 ));
-DATA(insert ( 2776 0 2 f 2751 ));
-DATA(insert ( 2776 0 3 t 2752 ));
+DATA(insert ( 2776 0 1 f 2750 ));
+DATA(insert ( 2776 0 2 f 2751 ));
+DATA(insert ( 2776 0 3 t 2752 ));
DATA(insert ( 2776 0 4 t 1070 ));
/*
* gin _timestamp_ops
*/
-DATA(insert ( 2777 0 1 f 2750 ));
-DATA(insert ( 2777 0 2 f 2751 ));
-DATA(insert ( 2777 0 3 t 2752 ));
+DATA(insert ( 2777 0 1 f 2750 ));
+DATA(insert ( 2777 0 2 f 2751 ));
+DATA(insert ( 2777 0 3 t 2752 ));
DATA(insert ( 2777 0 4 t 1070 ));
/*
* gin _money_ops
*/
-DATA(insert ( 2778 0 1 f 2750 ));
-DATA(insert ( 2778 0 2 f 2751 ));
-DATA(insert ( 2778 0 3 t 2752 ));
+DATA(insert ( 2778 0 1 f 2750 ));
+DATA(insert ( 2778 0 2 f 2751 ));
+DATA(insert ( 2778 0 3 t 2752 ));
DATA(insert ( 2778 0 4 t 1070 ));
/*
* gin _reltime_ops
*/
-DATA(insert ( 2779 0 1 f 2750 ));
-DATA(insert ( 2779 0 2 f 2751 ));
-DATA(insert ( 2779 0 3 t 2752 ));
+DATA(insert ( 2779 0 1 f 2750 ));
+DATA(insert ( 2779 0 2 f 2751 ));
+DATA(insert ( 2779 0 3 t 2752 ));
DATA(insert ( 2779 0 4 t 1070 ));
/*
* gin _tinterval_ops
*/
-DATA(insert ( 2780 0 1 f 2750 ));
-DATA(insert ( 2780 0 2 f 2751 ));
-DATA(insert ( 2780 0 3 t 2752 ));
+DATA(insert ( 2780 0 1 f 2750 ));
+DATA(insert ( 2780 0 2 f 2751 ));
+DATA(insert ( 2780 0 3 t 2752 ));
DATA(insert ( 2780 0 4 t 1070 ));
#endif /* PG_AMOP_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.59 2006/07/21 20:51:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.60 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert ( 2099 0 1 377 ));
DATA(insert ( 2233 0 1 380 ));
DATA(insert ( 2234 0 1 381 ));
-DATA(insert ( 2789 0 1 2794 ));
+DATA(insert ( 2789 0 1 2794 ));
/* hash */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.124 2006/08/25 04:06:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.125 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
{ 1259, {"relhaspkey"}, 16, -1, 1, 22, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
{ 1259, {"relhasrules"}, 16, -1, 1, 23, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
{ 1259, {"relhassubclass"},16, -1, 1, 24, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1259, {"relminxid"}, 28, -1, 4, 25, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
+{ 1259, {"relminxid"}, 28, -1, 4, 25, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"relvacuumxid"}, 28, -1, 4, 26, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"relacl"}, 1034, -1, -1, 27, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1259, {"reloptions"}, 1009, -1, -1, 28, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.95 2006/07/10 16:20:51 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.96 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
bool relhasrules; /* has associated rules */
bool relhassubclass; /* has derived classes */
TransactionId relminxid; /* minimum Xid present in table */
- TransactionId relvacuumxid; /* Xid used as last vacuum OldestXmin */
+ TransactionId relvacuumxid; /* Xid used as last vacuum OldestXmin */
/*
* VARIABLE LENGTH FIELDS start here. These fields may be NULL, too.
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.32 2006/08/21 16:16:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.33 2006/10/04 00:30:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
CheckPoint checkPointCopy; /* copy of last check point record */
- XLogRecPtr minRecoveryPoint; /* must replay xlog to here */
+ XLogRecPtr minRecoveryPoint; /* must replay xlog to here */
/*
* This data is used to check for hardware-architecture compatibility of
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_index.h,v 1.40 2006/08/25 04:06:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_index.h,v 1.41 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
bool indisunique; /* is this a unique index? */
bool indisprimary; /* is this index for primary key? */
bool indisclustered; /* is this the index last clustered by? */
- bool indisvalid; /* is this index valid for use by queries? */
+ bool indisvalid; /* is this index valid for use by queries? */
/* VARIABLE LENGTH FIELDS: */
int2vector indkey; /* column numbers of indexed cols, or 0 */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.145 2006/09/10 00:29:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.146 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert OID = 387 ( "=" PGNSP PGUID b f 27 27 16 387 402 2799 2799 2799 2800 tideq eqsel eqjoinsel ));
#define TIDEqualOperator 387
DATA(insert OID = 402 ( "<>" PGNSP PGUID b f 27 27 16 402 387 0 0 0 0 tidne neqsel neqjoinsel ));
-DATA(insert OID = 2799 ( "<" PGNSP PGUID b f 27 27 16 2800 2802 0 0 0 0 tidlt scalarltsel scalarltjoinsel ));
+DATA(insert OID = 2799 ( "<" PGNSP PGUID b f 27 27 16 2800 2802 0 0 0 0 tidlt scalarltsel scalarltjoinsel ));
#define TIDLessOperator 2799
-DATA(insert OID = 2800 ( ">" PGNSP PGUID b f 27 27 16 2799 2801 0 0 0 0 tidgt scalargtsel scalargtjoinsel ));
-DATA(insert OID = 2801 ( "<=" PGNSP PGUID b f 27 27 16 2802 2800 0 0 0 0 tidle scalarltsel scalarltjoinsel ));
-DATA(insert OID = 2802 ( ">=" PGNSP PGUID b f 27 27 16 2801 2799 0 0 0 0 tidge scalargtsel scalargtjoinsel ));
+DATA(insert OID = 2800 ( ">" PGNSP PGUID b f 27 27 16 2799 2801 0 0 0 0 tidgt scalargtsel scalargtjoinsel ));
+DATA(insert OID = 2801 ( "<=" PGNSP PGUID b f 27 27 16 2802 2800 0 0 0 0 tidle scalarltsel scalarltjoinsel ));
+DATA(insert OID = 2802 ( ">=" PGNSP PGUID b f 27 27 16 2801 2799 0 0 0 0 tidge scalargtsel scalargtjoinsel ));
DATA(insert OID = 410 ( "=" PGNSP PGUID b t 20 20 16 410 411 412 412 412 413 int8eq eqsel eqjoinsel ));
DATA(insert OID = 411 ( "<>" PGNSP PGUID b f 20 20 16 411 410 0 0 0 0 int8ne neqsel neqjoinsel ));
DATA(insert OID = 2590 ( "|&>" PGNSP PGUID b f 718 718 16 0 0 0 0 0 0 circle_overabove positionsel positionjoinsel ));
/* overlap/contains/contained for arrays */
-DATA(insert OID = 2750 ( "&&" PGNSP PGUID b f 2277 2277 16 2750 0 0 0 0 0 arrayoverlap areasel areajoinsel ));
-DATA(insert OID = 2751 ( "@>" PGNSP PGUID b f 2277 2277 16 2752 0 0 0 0 0 arraycontains contsel contjoinsel ));
-DATA(insert OID = 2752 ( "<@" PGNSP PGUID b f 2277 2277 16 2751 0 0 0 0 0 arraycontained contsel contjoinsel ));
+DATA(insert OID = 2750 ( "&&" PGNSP PGUID b f 2277 2277 16 2750 0 0 0 0 0 arrayoverlap areasel areajoinsel ));
+DATA(insert OID = 2751 ( "@>" PGNSP PGUID b f 2277 2277 16 2752 0 0 0 0 0 arraycontains contsel contjoinsel ));
+DATA(insert OID = 2752 ( "<@" PGNSP PGUID b f 2277 2277 16 2751 0 0 0 0 0 arraycontained contsel contjoinsel ));
/* obsolete names for contains/contained-by operators; remove these someday */
-DATA(insert OID = 2860 ( "@" PGNSP PGUID b f 604 604 16 2861 0 0 0 0 0 poly_contained contsel contjoinsel ));
-DATA(insert OID = 2861 ( "~" PGNSP PGUID b f 604 604 16 2860 0 0 0 0 0 poly_contain contsel contjoinsel ));
-DATA(insert OID = 2862 ( "@" PGNSP PGUID b f 603 603 16 2863 0 0 0 0 0 box_contained contsel contjoinsel ));
-DATA(insert OID = 2863 ( "~" PGNSP PGUID b f 603 603 16 2862 0 0 0 0 0 box_contain contsel contjoinsel ));
+DATA(insert OID = 2860 ( "@" PGNSP PGUID b f 604 604 16 2861 0 0 0 0 0 poly_contained contsel contjoinsel ));
+DATA(insert OID = 2861 ( "~" PGNSP PGUID b f 604 604 16 2860 0 0 0 0 0 poly_contain contsel contjoinsel ));
+DATA(insert OID = 2862 ( "@" PGNSP PGUID b f 603 603 16 2863 0 0 0 0 0 box_contained contsel contjoinsel ));
+DATA(insert OID = 2863 ( "~" PGNSP PGUID b f 603 603 16 2862 0 0 0 0 0 box_contain contsel contjoinsel ));
DATA(insert OID = 2864 ( "@" PGNSP PGUID b f 718 718 16 2865 0 0 0 0 0 circle_contained contsel contjoinsel ));
DATA(insert OID = 2865 ( "~" PGNSP PGUID b f 718 718 16 2864 0 0 0 0 0 circle_contain contsel contjoinsel ));
DATA(insert OID = 2866 ( "@" PGNSP PGUID b f 600 603 16 0 0 0 0 0 0 on_pb - - ));
-DATA(insert OID = 2867 ( "@" PGNSP PGUID b f 600 602 16 2868 0 0 0 0 0 on_ppath - - ));
+DATA(insert OID = 2867 ( "@" PGNSP PGUID b f 600 602 16 2868 0 0 0 0 0 on_ppath - - ));
DATA(insert OID = 2868 ( "~" PGNSP PGUID b f 602 600 16 2867 0 0 0 0 0 path_contain_pt - - ));
DATA(insert OID = 2869 ( "@" PGNSP PGUID b f 600 604 16 2870 0 0 0 0 0 pt_contained_poly - - ));
DATA(insert OID = 2870 ( "~" PGNSP PGUID b f 604 600 16 2869 0 0 0 0 0 poly_contain_pt - - ));
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.426 2006/09/18 22:40:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.427 2006/10/04 00:30:07 momjian Exp $
*
* NOTES
* The script catalog/genbki.sh reads this file and generates .bki
DATA(insert OID = 1219 ( int8inc PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8inc - _null_ ));
DESCR("increment");
-DATA(insert OID = 2804 ( int8inc_any PGNSP PGUID 12 f f t f i 2 20 "20 2276" _null_ _null_ _null_ int8inc_any - _null_ ));
+DATA(insert OID = 2804 ( int8inc_any PGNSP PGUID 12 f f t f i 2 20 "20 2276" _null_ _null_ _null_ int8inc_any - _null_ ));
DESCR("increment, ignores second argument");
DATA(insert OID = 1230 ( int8abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ _null_ _null_ int8abs - _null_ ));
DESCR("absolute value");
DESCR("convert time with time zone and date to timestamp with time zone");
DATA(insert OID = 1299 ( now PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ now - _null_ ));
DESCR("current transaction time");
-DATA(insert OID = 2647 ( transaction_timestamp PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ now - _null_ ));
+DATA(insert OID = 2647 ( transaction_timestamp PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ now - _null_ ));
DESCR("current transaction time");
-DATA(insert OID = 2648 ( statement_timestamp PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ statement_timestamp - _null_ ));
+DATA(insert OID = 2648 ( statement_timestamp PGNSP PGUID 12 f f t f s 0 1184 "" _null_ _null_ _null_ statement_timestamp - _null_ ));
DESCR("current statement time");
-DATA(insert OID = 2649 ( clock_timestamp PGNSP PGUID 12 f f t f v 0 1184 "" _null_ _null_ _null_ clock_timestamp - _null_ ));
+DATA(insert OID = 2649 ( clock_timestamp PGNSP PGUID 12 f f t f v 0 1184 "" _null_ _null_ _null_ clock_timestamp - _null_ ));
DESCR("current clock time");
/* OIDS 1300 - 1399 */
DESCR("VAR_SAMP aggregate final function");
DATA(insert OID = 2513 ( float8_stddev_pop PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ _null_ _null_ float8_stddev_pop - _null_ ));
DESCR("STDDEV_POP aggregate final function");
-DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ _null_ _null_ float8_stddev_samp - _null_ ));
+DATA(insert OID = 1832 ( float8_stddev_samp PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ _null_ _null_ float8_stddev_samp - _null_ ));
DESCR("STDDEV_SAMP aggregate final function");
DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 1700" _null_ _null_ _null_ numeric_accum - _null_ ));
DESCR("aggregate transition function");
DESCR("VAR_SAMP aggregate final function");
DATA(insert OID = 2596 ( numeric_stddev_pop PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_stddev_pop - _null_ ));
DESCR("STDDEV_POP aggregate final function");
-DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_stddev_samp - _null_ ));
+DATA(insert OID = 1839 ( numeric_stddev_samp PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ _null_ _null_ numeric_stddev_samp - _null_ ));
DESCR("STDDEV_SAMP aggregate final function");
DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 f f f f i 2 20 "20 21" _null_ _null_ _null_ int2_sum - _null_ ));
DESCR("SUM(int2) transition function");
DESCR("gin(internal)");
/* GIN array support */
-DATA(insert OID = 2743 ( ginarrayextract PGNSP PGUID 12 f f t f i 2 2281 "2277 2281" _null_ _null_ _null_ ginarrayextract - _null_ ));
+DATA(insert OID = 2743 ( ginarrayextract PGNSP PGUID 12 f f t f i 2 2281 "2277 2281" _null_ _null_ _null_ ginarrayextract - _null_ ));
DESCR("GIN array support");
DATA(insert OID = 2744 ( ginarrayconsistent PGNSP PGUID 12 f f t f i 3 16 "2281 21 2281" _null_ _null_ _null_ ginarrayconsistent - _null_ ));
DESCR("GIN array support");
* NOTE: an object is identified by the OID of the row that primarily
* defines the object, plus the OID of the table that that row appears in.
* For example, a database is identified by the OID of its pg_database row
- * plus the pg_class OID of table pg_database. This allows unique
+ * plus the pg_class OID of table pg_database. This allows unique
* identification of objects without assuming that OIDs are unique
* across tables.
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_shdescription.h,v 1.2 2006/03/05 15:58:55 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_shdescription.h,v 1.3 2006/10/04 00:30:08 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* compiler constants for pg_shdescription
* ----------------
*/
-#define Natts_pg_shdescription 3
+#define Natts_pg_shdescription 3
#define Anum_pg_shdescription_objoid 1
#define Anum_pg_shdescription_classoid 2
#define Anum_pg_shdescription_description 3
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.171 2006/04/05 22:11:57 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.172 2006/10/04 00:30:08 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert OID = 2209 ( _regoperator PGNSP PGUID -1 f b t \054 0 2204 array_in array_out array_recv array_send - i x f 0 -1 0 _null_ _null_ ));
DATA(insert OID = 2210 ( _regclass PGNSP PGUID -1 f b t \054 0 2205 array_in array_out array_recv array_send - i x f 0 -1 0 _null_ _null_ ));
DATA(insert OID = 2211 ( _regtype PGNSP PGUID -1 f b t \054 0 2206 array_in array_out array_recv array_send - i x f 0 -1 0 _null_ _null_ ));
-#define REGTYPEARRAYOID 2211
+#define REGTYPEARRAYOID 2211
/*
* pseudo-types
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/toasting.h,v 1.1 2006/07/31 01:16:37 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/toasting.h,v 1.2 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
extern void AlterTableCreateToastTable(Oid relOid);
extern void BootstrapToastTable(char *relName,
- Oid toastOid, Oid toastIndexOid);
+ Oid toastOid, Oid toastIndexOid);
/*
*/
/* normal catalogs */
-DECLARE_TOAST(pg_attrdef,2830,2831);
-DECLARE_TOAST(pg_constraint,2832,2833);
-DECLARE_TOAST(pg_description,2834,2835);
-DECLARE_TOAST(pg_proc,2836,2837);
-DECLARE_TOAST(pg_rewrite,2838,2839);
-DECLARE_TOAST(pg_statistic,2840,2841);
+DECLARE_TOAST(pg_attrdef, 2830, 2831);
+DECLARE_TOAST(pg_constraint, 2832, 2833);
+DECLARE_TOAST(pg_description, 2834, 2835);
+DECLARE_TOAST(pg_proc, 2836, 2837);
+DECLARE_TOAST(pg_rewrite, 2838, 2839);
+DECLARE_TOAST(pg_statistic, 2840, 2841);
/* shared catalogs */
-DECLARE_TOAST(pg_authid,2842,2843);
+DECLARE_TOAST(pg_authid, 2842, 2843);
#define PgAuthidToastTable 2842
#define PgAuthidToastIndex 2843
-DECLARE_TOAST(pg_database,2844,2845);
+DECLARE_TOAST(pg_database, 2844, 2845);
#define PgDatabaseToastTable 2844
#define PgDatabaseToastIndex 2845
-DECLARE_TOAST(pg_shdescription,2846,2847);
+DECLARE_TOAST(pg_shdescription, 2846, 2847);
#define PgShdescriptionToastTable 2846
#define PgShdescriptionToastIndex 2847
* related routines. CommentObject() implements the SQL "COMMENT ON"
* command. DeleteComments() deletes all comments for an object.
* CreateComments creates (or deletes, if comment is NULL) a comment
- * for a specific key. There are versions of these two methods for
+ * for a specific key. There are versions of these two methods for
* both normal and shared objects.
*------------------------------------------------------------------
*/
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.76 2006/08/25 04:06:55 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.77 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* commands/aggregatecmds.c */
extern void DefineAggregate(List *name, List *args, bool oldstyle,
- List *parameters);
+ List *parameters);
extern void RemoveAggregate(RemoveFuncStmt *stmt);
extern void RenameAggregate(List *name, List *args, const char *newname);
extern void AlterAggregateOwner(List *name, List *args, Oid newOwnerId);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.27 2006/07/13 16:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/explain.h,v 1.28 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ExplainQuery(ExplainStmt *stmt, ParamListInfo params,
- DestReceiver *dest);
+ DestReceiver *dest);
extern TupleDesc ExplainResultDesc(ExplainStmt *stmt);
*
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.21 2006/07/13 16:49:19 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.22 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct
{
/* dynahash.c requires key to be first field */
- char stmt_name[NAMEDATALEN];
- char *query_string; /* text of query, or NULL */
- const char *commandTag; /* command tag (a constant!), or NULL */
- List *query_list; /* list of queries, rewritten */
- List *plan_list; /* list of plans */
- List *argtype_list; /* list of parameter type OIDs */
- TimestampTz prepare_time; /* the time when the stmt was prepared */
- bool from_sql; /* stmt prepared via SQL, not
- * FE/BE protocol? */
- MemoryContext context; /* context containing this query */
+ char stmt_name[NAMEDATALEN];
+ char *query_string; /* text of query, or NULL */
+ const char *commandTag; /* command tag (a constant!), or NULL */
+ List *query_list; /* list of queries, rewritten */
+ List *plan_list; /* list of plans */
+ List *argtype_list; /* list of parameter type OIDs */
+ TimestampTz prepare_time; /* the time when the stmt was prepared */
+ bool from_sql; /* stmt prepared via SQL, not FE/BE protocol? */
+ MemoryContext context; /* context containing this query */
} PreparedStatement;
/* Utility statements PREPARE, EXECUTE, DEALLOCATE, EXPLAIN EXECUTE */
extern void PrepareQuery(PrepareStmt *stmt);
extern void ExecuteQuery(ExecuteStmt *stmt, ParamListInfo params,
- DestReceiver *dest, char *completionTag);
+ DestReceiver *dest, char *completionTag);
extern void DeallocateQuery(DeallocateStmt *stmt);
extern void ExplainExecuteQuery(ExplainStmt *stmt, ParamListInfo params,
- TupOutputState *tstate);
+ TupOutputState *tstate);
/* Low-level access to stored prepared statements */
extern void StorePreparedStatement(const char *stmt_name,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.30 2006/07/31 01:16:38 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.31 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void renamerel(Oid myrelid,
const char *newrelname);
-extern AttrNumber * varattnos_map(TupleDesc old, TupleDesc new);
-extern AttrNumber * varattnos_map_schema(TupleDesc old, List *schema);
+extern AttrNumber *varattnos_map(TupleDesc old, TupleDesc new);
+extern AttrNumber *varattnos_map_schema(TupleDesc old, List *schema);
extern void change_varattnos_of_a_node(Node *node, const AttrNumber *newattno);
extern void register_on_commit_action(Oid relid, OnCommitAction action);
* Commands for manipulating roles (formerly called users).
*
*
- * $PostgreSQL: pgsql/src/include/commands/user.h,v 1.29 2005/11/22 18:17:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/user.h,v 1.30 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void DropRole(DropRoleStmt *stmt);
extern void GrantRole(GrantRoleStmt *stmt);
extern void RenameRole(const char *oldname, const char *newname);
-extern void DropOwnedObjects(DropOwnedStmt * stmt);
-extern void ReassignOwnedObjects(ReassignOwnedStmt * stmt);
+extern void DropOwnedObjects(DropOwnedStmt *stmt);
+extern void ReassignOwnedObjects(ReassignOwnedStmt *stmt);
#endif /* USER_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.129 2006/08/12 02:52:06 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.130 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* REWIND indicates that the plan node should try to efficiently support
* rescans without parameter changes. (Nodes must support ExecReScan calls
* in any case, but if this flag was not given, they are at liberty to do it
- * through complete recalculation. Note that a parameter change forces a
+ * through complete recalculation. Note that a parameter change forces a
* full recalculation in any case.)
*
* BACKWARD indicates that the plan node must respect the es_direction flag.
* MARK indicates that the plan node must support Mark/Restore calls.
* When this is not passed, no Mark/Restore will occur.
*/
-#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */
-#define EXEC_FLAG_REWIND 0x0002 /* need efficient rescan */
-#define EXEC_FLAG_BACKWARD 0x0004 /* need backward scan */
-#define EXEC_FLAG_MARK 0x0008 /* need mark/restore */
+#define EXEC_FLAG_EXPLAIN_ONLY 0x0001 /* EXPLAIN, no ANALYZE */
+#define EXEC_FLAG_REWIND 0x0002 /* need efficient rescan */
+#define EXEC_FLAG_BACKWARD 0x0004 /* need backward scan */
+#define EXEC_FLAG_MARK 0x0008 /* need mark/restore */
/*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeIndexscan.h,v 1.28 2006/03/05 15:58:56 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeIndexscan.h,v 1.29 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
IndexRuntimeKeyInfo **runtimeKeys, int *numRuntimeKeys,
IndexArrayKeyInfo **arrayKeys, int *numArrayKeys);
extern void ExecIndexEvalRuntimeKeys(ExprContext *econtext,
- IndexRuntimeKeyInfo *runtimeKeys, int numRuntimeKeys);
+ IndexRuntimeKeyInfo *runtimeKeys, int numRuntimeKeys);
extern bool ExecIndexEvalArrayKeys(ExprContext *econtext,
- IndexArrayKeyInfo *arrayKeys, int numArrayKeys);
+ IndexArrayKeyInfo *arrayKeys, int numArrayKeys);
extern bool ExecIndexAdvanceArrayKeys(IndexArrayKeyInfo *arrayKeys, int numArrayKeys);
#endif /* NODEINDEXSCAN_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/nodeValuesscan.h,v 1.1 2006/08/02 01:59:47 joe Exp $
+ * $PostgreSQL: pgsql/src/include/executor/nodeValuesscan.h,v 1.2 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "nodes/execnodes.h"
-extern int ExecCountSlotsValuesScan(ValuesScan *node);
+extern int ExecCountSlotsValuesScan(ValuesScan *node);
extern ValuesScanState *ExecInitValuesScan(ValuesScan *node, EState *estate, int eflags);
extern TupleTableSlot *ExecValuesScan(ValuesScanState *node);
extern void ExecEndValuesScan(ValuesScanState *node);
*
* spi.h
*
- * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.57 2006/09/03 03:19:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.58 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define SPI_OK_DELETE 8
#define SPI_OK_UPDATE 9
#define SPI_OK_CURSOR 10
-#define SPI_OK_INSERT_RETURNING 11
-#define SPI_OK_DELETE_RETURNING 12
-#define SPI_OK_UPDATE_RETURNING 13
+#define SPI_OK_INSERT_RETURNING 11
+#define SPI_OK_DELETE_RETURNING 12
+#define SPI_OK_UPDATE_RETURNING 13
extern DLLIMPORT uint32 SPI_processed;
extern DLLIMPORT Oid SPI_lastoid;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/tuptable.h,v 1.35 2006/07/13 17:47:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/tuptable.h,v 1.36 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* A "minimal" tuple is handled similarly to a palloc'd regular tuple.
* At present, minimal tuples never are stored in buffers, so there is no
- * parallel to case 1. Note that a minimal tuple has no "system columns".
+ * parallel to case 1. Note that a minimal tuple has no "system columns".
* (Actually, it could have an OID, but we have no need to access the OID.)
*
* A "virtual" tuple is an optimization used to minimize physical data
* object (the thing to be pfree'd if tts_shouldFree is true). In this case
* tts_tuple points at tts_minhdr and the fields of that are set correctly
* for access to the minimal tuple; in particular, tts_minhdr.t_data points
- * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. (tts_mintuple is therefore
+ * MINIMAL_TUPLE_OFFSET bytes before tts_mintuple. (tts_mintuple is therefore
* redundant, but for code simplicity we store it explicitly anyway.) This
* case otherwise behaves identically to the regular-physical-tuple case.
*
Buffer buffer,
bool shouldFree);
extern TupleTableSlot *ExecStoreMinimalTuple(MinimalTuple mtup,
- TupleTableSlot *slot,
- bool shouldFree);
+ TupleTableSlot *slot,
+ bool shouldFree);
extern TupleTableSlot *ExecClearTuple(TupleTableSlot *slot);
extern TupleTableSlot *ExecStoreVirtualTuple(TupleTableSlot *slot);
extern TupleTableSlot *ExecStoreAllNullTuple(TupleTableSlot *slot);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.47 2006/08/16 04:32:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/fmgr.h,v 1.48 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* We don't want to include primnodes.h here, so make a stub reference */
typedef struct Node *fmNodePtr;
+
/* Likewise, avoid including stringinfo.h here */
typedef struct StringInfoData *fmStringInfo;
* To compile with versions of PostgreSQL that do not support this,
* you may put an #ifdef/#endif test around it. Note that in a multiple-
* source-file module, the macro call should only appear once.
- *
+ *
* The specific items included in the magic block are intended to be ones that
* are custom-configurable and especially likely to break dynamically loaded
* modules if they were compiled with other values. Also, the length field
/* Definition of the magic block structure */
typedef struct
{
- int len; /* sizeof(this struct) */
- int version; /* PostgreSQL major version */
- int funcmaxargs; /* FUNC_MAX_ARGS */
- int indexmaxkeys; /* INDEX_MAX_KEYS */
- int namedatalen; /* NAMEDATALEN */
+ int len; /* sizeof(this struct) */
+ int version; /* PostgreSQL major version */
+ int funcmaxargs; /* FUNC_MAX_ARGS */
+ int indexmaxkeys; /* INDEX_MAX_KEYS */
+ int namedatalen; /* NAMEDATALEN */
} Pg_magic_struct;
/* The actual data block contents */
/* Special cases for convenient invocation of datatype I/O functions. */
extern Datum InputFunctionCall(FmgrInfo *flinfo, char *str,
- Oid typioparam, int32 typmod);
+ Oid typioparam, int32 typmod);
extern Datum OidInputFunctionCall(Oid functionId, char *str,
- Oid typioparam, int32 typmod);
+ Oid typioparam, int32 typmod);
extern char *OutputFunctionCall(FmgrInfo *flinfo, Datum val);
extern char *OidOutputFunctionCall(Oid functionId, Datum val);
extern Datum ReceiveFunctionCall(FmgrInfo *flinfo, fmStringInfo buf,
- Oid typioparam, int32 typmod);
+ Oid typioparam, int32 typmod);
extern Datum OidReceiveFunctionCall(Oid functionId, fmStringInfo buf,
- Oid typioparam, int32 typmod);
+ Oid typioparam, int32 typmod);
extern bytea *SendFunctionCall(FmgrInfo *flinfo, Datum val);
extern bytea *OidSendFunctionCall(Oid functionId, Datum val);
*
* Copyright (c) 2002-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.23 2006/03/05 15:58:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.24 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *argmodes,
Node *call_expr);
-extern int get_func_arg_info(HeapTuple procTup,
- Oid **p_argtypes, char ***p_argnames,
- char **p_argmodes);
+extern int get_func_arg_info(HeapTuple procTup,
+ Oid **p_argtypes, char ***p_argnames,
+ char **p_argmodes);
extern char *get_func_result_name(Oid functionId);
*
* Copyright (c) 2003-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/getaddrinfo.h,v 1.21 2006/07/06 02:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/getaddrinfo.h,v 1.22 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#else /* WIN32 */
#ifdef WIN32_ONLY_COMPILER
#ifndef WSA_NOT_ENOUGH_MEMORY
-#define WSA_NOT_ENOUGH_MEMORY (WSAENOBUFS)
+#define WSA_NOT_ENOUGH_MEMORY (WSAENOBUFS)
#endif
-#define WSATYPE_NOT_FOUND (WSABASEERR+109)
+#define WSATYPE_NOT_FOUND (WSABASEERR+109)
#endif
#define EAI_AGAIN WSATRY_AGAIN
#define EAI_BADFLAGS WSAEINVAL
* Interface to hba.c
*
*
- * $PostgreSQL: pgsql/src/include/libpq/hba.h,v 1.43 2006/07/10 16:20:52 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/hba.h,v 1.44 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
,uaPAM
#endif /* USE_PAM */
#ifdef USE_LDAP
- ,uaLDAP
+ ,uaLDAP
#endif
} UserAuth;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.56 2006/06/20 22:52:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.57 2006/10/04 00:30:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* but since it gets used by elog.c in the same way as database_name and
* other members of this struct, we may as well keep it here.
*/
- TimestampTz SessionStartTime; /* backend start time */
- time_t session_start; /* same, in time_t format */
+ TimestampTz SessionStartTime; /* backend start time */
+ time_t session_start; /* same, in time_t format */
/*
* TCP keepalive settings.
-/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.68 2006/05/21 20:05:21 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.69 2006/10/04 00:30:09 momjian Exp $ */
#ifndef PG_WCHAR_H
#define PG_WCHAR_H
#define LC_JISX0208_1978 0x90 /* Japanese Kanji, old JIS (not supported) */
/* #define FREE 0x90 free (unused) */
#define LC_GB2312_80 0x91 /* Chinese */
-#define LC_JISX0208 0x92 /* Japanese Kanji (JIS X 0208) */
-#define LC_KS5601 0x93 /* Korean */
-#define LC_JISX0212 0x94 /* Japanese Kanji (JIS X 0212) */
+#define LC_JISX0208 0x92 /* Japanese Kanji (JIS X 0208) */
+#define LC_KS5601 0x93 /* Korean */
+#define LC_JISX0212 0x94 /* Japanese Kanji (JIS X 0212) */
#define LC_CNS11643_1 0x95 /* CNS 11643-1992 Plane 1 */
#define LC_CNS11643_2 0x96 /* CNS 11643-1992 Plane 2 */
/* #define FREE 0x97 free (unused) */
-#define LC_BIG5_1 0x98 /* Plane 1 Chinese traditional (not supported) */
-#define LC_BIG5_2 0x99 /* Plane 1 Chinese traditional (not supported) */
+#define LC_BIG5_1 0x98 /* Plane 1 Chinese traditional (not supported) */
+#define LC_BIG5_2 0x99 /* Plane 1 Chinese traditional (not supported) */
/*
* Private single byte encodings (0xa0-0xef)
*/
-#define LC_SISHENG 0xa0 /* Chinese SiSheng characters for
+#define LC_SISHENG 0xa0/* Chinese SiSheng characters for
* PinYin/ZhuYin (not supported) */
-#define LC_IPA 0xa1 /* IPA (International Phonetic Association)
+#define LC_IPA 0xa1/* IPA (International Phonetic Association)
* (not supported) */
-#define LC_VISCII_LOWER 0xa2 /* Vietnamese VISCII1.1 lower-case (not
+#define LC_VISCII_LOWER 0xa2/* Vietnamese VISCII1.1 lower-case (not
* supported) */
-#define LC_VISCII_UPPER 0xa3 /* Vietnamese VISCII1.1 upper-case (not
+#define LC_VISCII_UPPER 0xa3/* Vietnamese VISCII1.1 upper-case (not
* supported) */
-#define LC_ARABIC_DIGIT 0xa4 /* Arabic digit (not supported) */
+#define LC_ARABIC_DIGIT 0xa4 /* Arabic digit (not supported) */
#define LC_ARABIC_1_COLUMN 0xa5 /* Arabic 1-column (not supported) */
#define LC_ASCII_RIGHT_TO_LEFT 0xa6 /* ASCII (left half of ISO8859-1) with
* right-to-left direction (not
* supported) */
-#define LC_LAO 0xa7 /* Lao characters (ISO10646 0E80..0EDF) (not
+#define LC_LAO 0xa7/* Lao characters (ISO10646 0E80..0EDF) (not
* supported) */
#define LC_ARABIC_2_COLUMN 0xa8 /* Arabic 1-column (not supported) */
#define LC_INDIAN_1_COLUMN 0xf0/* Indian charset for 1-column width glypps
* (not supported) */
#define LC_TIBETAN_1_COLUMN 0xf1 /* Tibetan 1 column glyph (not supported) */
-#define LC_ETHIOPIC 0xf5 /* Ethiopic characters (not supported) */
+#define LC_ETHIOPIC 0xf5 /* Ethiopic characters (not supported) */
#define LC_CNS11643_3 0xf6 /* CNS 11643-1992 Plane 3 */
#define LC_CNS11643_4 0xf7 /* CNS 11643-1992 Plane 4 */
#define LC_CNS11643_5 0xf8 /* CNS 11643-1992 Plane 5 */
#define LC_CNS11643_7 0xfa /* CNS 11643-1992 Plane 7 */
#define LC_INDIAN_2_COLUMN 0xfb/* Indian charset for 2-column width glypps
* (not supported) */
-#define LC_TIBETAN 0xfc /* Tibetan (not supported) */
+#define LC_TIBETAN 0xfc /* Tibetan (not supported) */
/* #define FREE 0xfd free (unused) */
/* #define FREE 0xfe free (unused) */
/* #define FREE 0xff free (unused) */
PG_WIN1255, /* windows-1255 */
PG_WIN1257, /* windows-1257 */
/* PG_ENCODING_BE_LAST points to the above entry */
-
+
/* followings are for client encoding only */
PG_SJIS, /* Shift JIS (Winindows-932) */
PG_BIG5, /* Big5 (Windows-950) */
extern bool pg_verifymbstr(const char *mbstr, int len, bool noError);
extern bool pg_verify_mbstr(int encoding, const char *mbstr, int len,
- bool noError);
+ bool noError);
extern void report_invalid_encoding(int encoding, const char *mbstr, int len);
extern void report_untranslatable_char(int src_encoding, int dest_encoding,
- const char *mbstr, int len);
+ const char *mbstr, int len);
extern void pg_ascii2mic(const unsigned char *l, unsigned char *p, int len);
extern void pg_mic2ascii(const unsigned char *mic, unsigned char *p, int len);
extern void latin2mic(const unsigned char *l, unsigned char *p, int len,
- int lc, int encoding);
+ int lc, int encoding);
extern void mic2latin(const unsigned char *mic, unsigned char *p, int len,
- int lc, int encoding);
+ int lc, int encoding);
extern void latin2mic_with_table(const unsigned char *l, unsigned char *p,
- int len, int lc, int encoding,
- const unsigned char *tab);
+ int len, int lc, int encoding,
+ const unsigned char *tab);
extern void mic2latin_with_table(const unsigned char *mic, unsigned char *p,
- int len, int lc, int encoding,
- const unsigned char *tab);
+ int len, int lc, int encoding,
+ const unsigned char *tab);
extern bool pg_utf8_islegal(const unsigned char *source, int length);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.32 2006/09/06 20:40:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.33 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ----------------
*/
-#define PARAM_FLAG_CONST 0x0001 /* parameter is constant */
+#define PARAM_FLAG_CONST 0x0001 /* parameter is constant */
typedef struct ParamExternData
{
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.330 2006/09/05 21:08:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.331 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define ACL_USAGE (1<<8) /* for languages and namespaces */
#define ACL_CREATE (1<<9) /* for namespaces and databases */
#define ACL_CREATE_TEMP (1<<10) /* for databases */
-#define ACL_CONNECT (1<<11) /* for databases */
+#define ACL_CONNECT (1<<11) /* for databases */
#define N_ACL_RIGHTS 12 /* 1 plus the last 1<
#define ACL_NO_RIGHTS 0
/* Currently, SELECT ... FOR UPDATE/FOR SHARE requires UPDATE privileges */
* INSERT/UPDATE/DELETE; 0 for SELECT */
RangeVar *into; /* target relation for SELECT INTO */
- List *intoOptions; /* options from WITH clause */
+ List *intoOptions; /* options from WITH clause */
OnCommitAction intoOnCommit; /* what do we do at COMMIT? */
- char *intoTableSpaceName; /* table space to use, or NULL */
+ char *intoTableSpaceName; /* table space to use, or NULL */
bool hasAggs; /* has aggregates in tlist or havingQual */
bool hasSubLinks; /* has subquery SubLink */
List *resultRelations; /* integer list of RT indexes, or NIL */
/*
- * If the query has a returningList then the planner will store a list
- * of processed targetlists (one per result relation) here. We must
- * have a separate RETURNING targetlist for each result rel because
- * column numbers may vary within an inheritance tree. In the targetlists,
- * Vars referencing the result relation will have their original varno
- * and varattno, while Vars referencing other rels will be converted
- * to have varno OUTER and varattno referencing a resjunk entry in the
- * top plan node's targetlist. XXX This field ought to go in some sort of
- * TopPlan plan node, not in the Query.
+ * If the query has a returningList then the planner will store a list of
+ * processed targetlists (one per result relation) here. We must have a
+ * separate RETURNING targetlist for each result rel because column
+ * numbers may vary within an inheritance tree. In the targetlists, Vars
+ * referencing the result relation will have their original varno and
+ * varattno, while Vars referencing other rels will be converted to have
+ * varno OUTER and varattno referencing a resjunk entry in the top plan
+ * node's targetlist. XXX This field ought to go in some sort of TopPlan
+ * plan node, not in the Query.
*/
- List *returningLists; /* list of lists of TargetEntry, or NIL */
+ List *returningLists; /* list of lists of TargetEntry, or NIL */
} Query;
NodeTag type;
Node *funccallnode; /* untransformed function call tree */
Alias *alias; /* table alias & optional column aliases */
- List *coldeflist; /* list of ColumnDef nodes to describe
- * result of function returning RECORD */
+ List *coldeflist; /* list of ColumnDef nodes to describe result
+ * of function returning RECORD */
} RangeFunction;
/*
{
NodeTag type;
RangeVar *relation;
- List *options;
+ List *options;
} InhRelation;
/*
*/
Node *funcexpr; /* expression tree for func call */
List *funccoltypes; /* OID list of column type OIDs */
- List *funccoltypmods; /* integer list of column typmods */
+ List *funccoltypmods; /* integer list of column typmods */
/*
* Fields valid for a values RTE (else NIL):
/*
* These fields are used only in "leaf" SelectStmts.
*
- * into, intoColNames, intoOptions, intoOnCommit, and
- * intoTableSpaceName are a kluge; they belong somewhere else...
+ * into, intoColNames, intoOptions, intoOnCommit, and intoTableSpaceName
+ * are a kluge; they belong somewhere else...
*/
List *distinctClause; /* NULL, list of DISTINCT ON exprs, or
* lcons(NIL,NIL) for all (SELECT DISTINCT) */
RangeVar *into; /* target table (for select into table) */
List *intoColNames; /* column names for into table */
List *intoOptions; /* options from WITH clause */
- OnCommitAction intoOnCommit; /* what do we do at COMMIT? */
+ OnCommitAction intoOnCommit; /* what do we do at COMMIT? */
char *intoTableSpaceName; /* table space to use, or NULL */
List *targetList; /* the target list (of ResTarget) */
List *fromClause; /* the FROM clause */
/*
* In a "leaf" node representing a VALUES list, the above fields are all
- * null, and instead this field is set. Note that the elements of
- * the sublists are just expressions, without ResTarget decoration.
- * Also note that a list element can be DEFAULT (represented as a
- * SetToDefault node), regardless of the context of the VALUES list.
- * It's up to parse analysis to reject that where not valid.
+ * null, and instead this field is set. Note that the elements of the
+ * sublists are just expressions, without ResTarget decoration. Also note
+ * that a list element can be DEFAULT (represented as a SetToDefault
+ * node), regardless of the context of the VALUES list. It's up to parse
+ * analysis to reject that where not valid.
*/
List *valuesLists; /* untransformed list of expression lists */
* Copy Statement
*
* We support "COPY relation FROM file", "COPY relation TO file", and
- * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
+ * "COPY (query) TO file". In any given CopyStmt, exactly one of "relation"
* and "query" must be non-NULL. Note: "query" is a SelectStmt before
* parse analysis, and a Query afterwards.
* ----------------------
char *tablespacename; /* table space to use, or NULL */
} CreateStmt;
-typedef enum CreateStmtLikeOption {
+typedef enum CreateStmtLikeOption
+{
CREATE_TABLE_LIKE_INCLUDING_DEFAULTS,
CREATE_TABLE_LIKE_EXCLUDING_DEFAULTS,
CREATE_TABLE_LIKE_INCLUDING_CONSTRAINTS,
typedef struct ExecuteStmt
{
- NodeTag type;
- char *name; /* The name of the plan to execute */
- RangeVar *into; /* Optional table to store results in */
- List *intoOptions; /* Options from WITH clause */
- OnCommitAction into_on_commit; /* What do we do at COMMIT? */
- char *into_tbl_space; /* Tablespace to use, or NULL */
- List *params; /* Values to assign to parameters */
+ NodeTag type;
+ char *name; /* The name of the plan to execute */
+ RangeVar *into; /* Optional table to store results in */
+ List *intoOptions; /* Options from WITH clause */
+ OnCommitAction into_on_commit; /* What do we do at COMMIT? */
+ char *into_tbl_space; /* Tablespace to use, or NULL */
+ List *params; /* Values to assign to parameters */
} ExecuteStmt;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.116 2006/09/28 20:51:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.117 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* For historical reasons, such parameters are numbered from 0.
* These numbers are independent of PARAM_EXTERN numbers.
*
- * PARAM_SUBLINK: The parameter represents an output column of a SubLink
+ * PARAM_SUBLINK: The parameter represents an output column of a SubLink
* node's sub-select. The column number is contained in the
* `paramid' field. (This type of Param is converted to
* PARAM_EXEC during planning.)
*
* NOTE: in the raw output of gram.y, testexpr contains just the raw form
* of the lefthand expression (if any), and operName is the String name of
- * the combining operator. Also, subselect is a raw parsetree. During parse
+ * the combining operator. Also, subselect is a raw parsetree. During parse
* analysis, the parser transforms testexpr into a complete boolean expression
* that compares the lefthand value(s) to PARAM_SUBLINK nodes representing the
* output columns of the subselect. And subselect is transformed to a Query.
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.127 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.128 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* This struct is conventionally called "root" in all the planner routines.
* It holds links to all of the planner's working state, in addition to the
- * original Query. Note that at present the planner extensively modifies
+ * original Query. Note that at present the planner extensively modifies
* the passed-in Query data structure; someday that should stop.
*----------
*/
* does not correspond to a base relation, such as a join RTE or an
* unreferenced view RTE; or if the RelOptInfo hasn't been made yet.
*/
- struct RelOptInfo **simple_rel_array; /* All 1-relation RelOptInfos */
- int simple_rel_array_size; /* allocated size of array */
+ struct RelOptInfo **simple_rel_array; /* All 1-relation RelOptInfos */
+ int simple_rel_array_size; /* allocated size of array */
/*
* join_rel_list is a list of all join-relation RelOptInfos we have
List *in_info_list; /* list of InClauseInfos */
- List *append_rel_list; /* list of AppendRelInfos */
+ List *append_rel_list; /* list of AppendRelInfos */
List *query_pathkeys; /* desired pathkeys for query_planner(), and
* actual pathkeys afterwards */
List *group_pathkeys; /* groupClause pathkeys, if any */
List *sort_pathkeys; /* sortClause pathkeys, if any */
- double total_table_pages; /* # of pages in all tables of query */
+ double total_table_pages; /* # of pages in all tables of query */
double tuple_fraction; /* tuple_fraction passed to query_planner */
bool hasJoinRTEs; /* true if any RTEs are RTE_JOIN kind */
bool hasOuterJoins; /* true if any RTEs are outer joins */
bool hasHavingQual; /* true if havingQual was non-null */
- bool hasPseudoConstantQuals; /* true if any RestrictInfo has
+ bool hasPseudoConstantQuals; /* true if any RestrictInfo has
* pseudoconstant = true */
} PlannerInfo;
* Currently the only kind of otherrels are those made for member relations
* of an "append relation", that is an inheritance set or UNION ALL subquery.
* An append relation has a parent RTE that is a base rel, which represents
- * the entire append relation. The member RTEs are otherrels. The parent
+ * the entire append relation. The member RTEs are otherrels. The parent
* is present in the query join tree but the members are not. The member
* RTEs and otherrels are used to plan the scans of the individual tables or
* subqueries of the append set; then the parent baserel is given an Append
*
* The pseudoconstant flag is set true if the clause contains no Vars of
* the current query level and no volatile functions. Such a clause can be
- * pulled out and used as a one-time qual in a gating Result node. We keep
+ * pulled out and used as a one-time qual in a gating Result node. We keep
* pseudoconstant clauses in the same lists as other RestrictInfos so that
* the regular clause-pushing machinery can assign them to the correct join
* level, but they need to be treated specially for cost and selectivity
bool can_join; /* see comment above */
- bool pseudoconstant; /* see comment above */
+ bool pseudoconstant; /* see comment above */
/* The set of relids (varnos) actually referenced in the clause: */
Relids clause_relids;
* Outer join info.
*
* One-sided outer joins constrain the order of joining partially but not
- * completely. We flatten such joins into the planner's top-level list of
+ * completely. We flatten such joins into the planner's top-level list of
* relations to join, but record information about each outer join in an
* OuterJoinInfo struct. These structs are kept in the PlannerInfo node's
* oj_info_list.
typedef struct AppendRelInfo
{
NodeTag type;
+
/*
- * These fields uniquely identify this append relationship. There
- * can be (in fact, always should be) multiple AppendRelInfos for the
- * same parent_relid, but never more than one per child_relid, since
- * a given RTE cannot be a child of more than one append parent.
+ * These fields uniquely identify this append relationship. There can be
+ * (in fact, always should be) multiple AppendRelInfos for the same
+ * parent_relid, but never more than one per child_relid, since a given
+ * RTE cannot be a child of more than one append parent.
*/
Index parent_relid; /* RT index of append parent rel */
Index child_relid; /* RT index of append child rel */
+
/*
* For an inheritance appendrel, the parent and child are both regular
* relations, and we store their rowtype OIDs here for use in translating
- * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
+ * whole-row Vars. For a UNION-ALL appendrel, the parent and child are
* both subqueries with no named rowtype, and we store InvalidOid here.
*/
- Oid parent_reltype; /* OID of parent's composite type */
+ Oid parent_reltype; /* OID of parent's composite type */
Oid child_reltype; /* OID of child's composite type */
/*
- * The N'th element of this list is the integer column number of
- * the child column corresponding to the N'th column of the parent.
- * A list element is zero if it corresponds to a dropped column of the
- * parent (this is only possible for inheritance cases, not UNION ALL).
+ * The N'th element of this list is the integer column number of the child
+ * column corresponding to the N'th column of the parent. A list element
+ * is zero if it corresponds to a dropped column of the parent (this is
+ * only possible for inheritance cases, not UNION ALL).
*/
List *col_mappings; /* list of child attribute numbers */
/*
- * The N'th element of this list is a Var or expression representing
- * the child column corresponding to the N'th column of the parent.
- * This is used to translate Vars referencing the parent rel into
- * references to the child. A list element is NULL if it corresponds
- * to a dropped column of the parent (this is only possible for
- * inheritance cases, not UNION ALL).
+ * The N'th element of this list is a Var or expression representing the
+ * child column corresponding to the N'th column of the parent. This is
+ * used to translate Vars referencing the parent rel into references to
+ * the child. A list element is NULL if it corresponds to a dropped
+ * column of the parent (this is only possible for inheritance cases, not
+ * UNION ALL).
*
* This might seem redundant with the col_mappings data, but it is handy
- * because flattening of sub-SELECTs that are members of a UNION ALL
- * will cause changes in the expressions that need to be substituted
- * for a parent Var. Adjusting this data structure lets us track what
- * really needs to be substituted.
+ * because flattening of sub-SELECTs that are members of a UNION ALL will
+ * cause changes in the expressions that need to be substituted for a
+ * parent Var. Adjusting this data structure lets us track what really
+ * needs to be substituted.
*
* Notice we only store entries for user columns (attno > 0). Whole-row
- * Vars are special-cased, and system columns (attno < 0) need no
- * special translation since their attnos are the same for all tables.
+ * Vars are special-cased, and system columns (attno < 0) need no special
+ * translation since their attnos are the same for all tables.
*
- * Caution: the Vars have varlevelsup = 0. Be careful to adjust
- * as needed when copying into a subquery.
+ * Caution: the Vars have varlevelsup = 0. Be careful to adjust as needed
+ * when copying into a subquery.
*/
- List *translated_vars; /* Expressions in the child's Vars */
+ List *translated_vars; /* Expressions in the child's Vars */
+
/*
- * We store the parent table's OID here for inheritance, or InvalidOid
- * for UNION ALL. This is only needed to help in generating error
- * messages if an attempt is made to reference a dropped parent column.
+ * We store the parent table's OID here for inheritance, or InvalidOid for
+ * UNION ALL. This is only needed to help in generating error messages if
+ * an attempt is made to reference a dropped parent column.
*/
Oid parent_reloid; /* OID of parent relation */
} AppendRelInfo;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/cost.h,v 1.81 2006/09/25 22:12:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/cost.h,v 1.82 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define DEFAULT_CPU_INDEX_TUPLE_COST 0.005
#define DEFAULT_CPU_OPERATOR_COST 0.0025
-#define DEFAULT_EFFECTIVE_CACHE_SIZE 16384 /* measured in pages */
+#define DEFAULT_EFFECTIVE_CACHE_SIZE 16384 /* measured in pages */
/*
extern double clamp_row_est(double nrows);
extern double index_pages_fetched(double tuples_fetched, BlockNumber pages,
- double index_pages, PlannerInfo *root);
+ double index_pages, PlannerInfo *root);
extern void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel);
extern void cost_index(IndexPath *path, PlannerInfo *root, IndexOptInfo *index,
List *indexQuals, RelOptInfo *outer_rel);
extern void cost_functionscan(Path *path, PlannerInfo *root,
RelOptInfo *baserel);
extern void cost_valuesscan(Path *path, PlannerInfo *root,
- RelOptInfo *baserel);
+ RelOptInfo *baserel);
extern void cost_sort(Path *path, PlannerInfo *root,
List *pathkeys, Cost input_cost, double tuples, int width);
extern void cost_material(Path *path,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.71 2006/08/02 01:59:48 joe Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/pathnode.h,v 1.72 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes for relnode.c
*/
extern RelOptInfo *build_simple_rel(PlannerInfo *root, int relid,
- RelOptKind reloptkind);
+ RelOptKind reloptkind);
extern RelOptInfo *find_base_rel(PlannerInfo *root, int relid);
extern RelOptInfo *find_join_rel(PlannerInfo *root, Relids relids);
extern RelOptInfo *build_join_rel(PlannerInfo *root,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/plancat.h,v 1.41 2006/09/19 22:49:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/plancat.h,v 1.42 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void get_relation_info(PlannerInfo *root, Oid relationObjectId,
- bool inhparent, RelOptInfo *rel);
+ bool inhparent, RelOptInfo *rel);
extern bool relation_excluded_by_constraints(RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
extern List *build_physical_tlist(PlannerInfo *root, RelOptInfo *rel);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/prep.h,v 1.56 2006/03/05 15:58:57 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/prep.h,v 1.57 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Node *adjust_appendrel_attrs(Node *node, AppendRelInfo *appinfo);
extern Relids *adjust_appendrel_attr_needed(RelOptInfo *oldrel,
- AppendRelInfo *appinfo,
- AttrNumber new_min_attr,
- AttrNumber new_max_attr);
+ AppendRelInfo *appinfo,
+ AttrNumber new_min_attr,
+ AttrNumber new_max_attr);
#endif /* PREP_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.37 2006/07/01 18:38:33 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.38 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool restriction_is_or_clause(RestrictInfo *restrictinfo);
extern List *get_actual_clauses(List *restrictinfo_list);
extern List *extract_actual_clauses(List *restrictinfo_list,
- bool pseudoconstant);
+ bool pseudoconstant);
extern void extract_actual_join_clauses(List *restrictinfo_list,
List **joinquals,
List **otherquals);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/analyze.h,v 1.33 2006/04/30 18:30:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/analyze.h,v 1.34 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern List *parse_analyze(Node *parseTree, const char *sourceText,
- Oid *paramTypes, int numParams);
+ Oid *paramTypes, int numParams);
extern List *parse_analyze_varparams(Node *parseTree, const char *sourceText,
- Oid **paramTypes, int *numParams);
+ Oid **paramTypes, int *numParams);
extern List *parse_sub_analyze(Node *parseTree, ParseState *parentParseState);
extern List *analyzeCreateSchemaStmt(CreateSchemaStmt *stmt);
extern void CheckSelectLocking(Query *qry);
extern void applyLockingClause(Query *qry, Index rtindex,
- bool forUpdate, bool noWait);
+ bool forUpdate, bool noWait);
#endif /* ANALYZE_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_coerce.h,v 1.65 2006/07/26 19:31:51 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_coerce.h,v 1.66 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Node *coerce_to_integer(ParseState *pstate, Node *node,
const char *constructName);
extern Node *coerce_to_bigint(ParseState *pstate, Node *node,
- const char *constructName);
+ const char *constructName);
extern Oid select_common_type(List *typeids, const char *context);
extern Node *coerce_to_common_type(ParseState *pstate, Node *node,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_func.h,v 1.56 2006/04/15 17:45:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_func.h,v 1.57 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Oid LookupFuncNameTypeNames(List *funcname, List *argtypes,
bool noError);
extern Oid LookupAggNameTypeNames(List *aggname, List *argtypes,
- bool noError);
+ bool noError);
#endif /* PARSE_FUNC_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.49 2006/04/30 18:30:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.50 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* links to current parse state of outer query.
*
* p_sourcetext: source string that generated the raw parsetree being
- * analyzed, or NULL if not available. (The string is used only to
+ * analyzed, or NULL if not available. (The string is used only to
* generate cursor positions in error messages: we need it to convert
* byte-wise locations in parse structures to character-wise cursor
* positions.)
Oid *p_paramtypes; /* OIDs of types for $n parameter symbols */
int p_numparams; /* allocated size of p_paramtypes[] */
int p_next_resno; /* next targetlist resno to assign */
- List *p_locking_clause; /* raw FOR UPDATE/FOR SHARE info */
- Node *p_value_substitute; /* what to replace VALUE with, if any */
+ List *p_locking_clause; /* raw FOR UPDATE/FOR SHARE info */
+ Node *p_value_substitute; /* what to replace VALUE with, if any */
bool p_variableparams;
bool p_hasAggs;
bool p_hasSubLinks;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_oper.h,v 1.39 2006/03/14 22:48:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_oper.h,v 1.40 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Routines to look up an operator given name and exact input type(s) */
extern Oid LookupOperName(ParseState *pstate, List *opername,
- Oid oprleft, Oid oprright,
- bool noError, int location);
+ Oid oprleft, Oid oprright,
+ bool noError, int location);
extern Oid LookupOperNameTypeNames(ParseState *pstate, List *opername,
- TypeName *oprleft, TypeName *oprright,
- bool noError, int location);
+ TypeName *oprleft, TypeName *oprright,
+ bool noError, int location);
/* Routines to find operators matching a name and given input types */
/* NB: the selected operator may require coercion of the input types! */
extern Operator oper(ParseState *pstate, List *op, Oid arg1, Oid arg2,
- bool noError, int location);
+ bool noError, int location);
extern Operator right_oper(ParseState *pstate, List *op, Oid arg,
- bool noError, int location);
+ bool noError, int location);
extern Operator left_oper(ParseState *pstate, List *op, Oid arg,
- bool noError, int location);
+ bool noError, int location);
/* Routines to find operators that DO NOT require coercion --- ie, their */
/* input types are either exactly as given, or binary-compatible */
extern Operator compatible_oper(ParseState *pstate, List *op,
- Oid arg1, Oid arg2,
- bool noError, int location);
+ Oid arg1, Oid arg2,
+ bool noError, int location);
/* currently no need for compatible_left_oper/compatible_right_oper */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_relation.h,v 1.54 2006/08/02 01:59:48 joe Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_relation.h,v 1.55 2006/10/04 00:30:09 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Node *scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte,
char *colname, int location);
extern Node *colNameToVar(ParseState *pstate, char *colname, bool localonly,
- int location);
+ int location);
extern Node *qualifiedNameToVar(ParseState *pstate,
char *schemaname,
char *refname,
bool addToJoinList,
bool addToRelNameSpace, bool addToVarNameSpace);
extern RangeTblEntry *addImplicitRTE(ParseState *pstate, RangeVar *relation,
- int location);
+ int location);
extern void expandRTE(RangeTblEntry *rte, int rtindex, int sublevels_up,
bool include_dropped,
List **colnames, List **colvars);
*
* Copyright (c) 2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/pg_trace.h,v 1.1 2006/07/24 16:32:45 petere Exp $
+ * $PostgreSQL: pgsql/src/include/pg_trace.h,v 1.2 2006/10/04 00:30:06 momjian Exp $
* ----------
*/
*
* Only one DTrace provider called "postgresql" will be used for PostgreSQL,
* so the name is hard-coded here to avoid having to specify it in the
- * source code.
+ * source code.
*/
#define PG_TRACE(name) \
DTRACE_PROBE4(postgresql, name, arg1, arg2, arg3, arg4)
#define PG_TRACE5(name, arg1, arg2, arg3, arg4, arg5) \
DTRACE_PROBE5(postgresql, name, arg1, arg2, arg3, arg4, arg5)
-
-#else /* not ENABLE_DTRACE */
+#else /* not ENABLE_DTRACE */
/*
* Unless DTrace is explicitly enabled with --enable-dtrace, the PG_TRACE
#define PG_TRACE3(name, arg1, arg2, arg3)
#define PG_TRACE4(name, arg1, arg2, arg3, arg4)
#define PG_TRACE5(name, arg1, arg2, arg3, arg4, arg5)
+#endif /* not ENABLE_DTRACE */
-#endif /* not ENABLE_DTRACE */
-
-#endif /* PG_TRACE_H */
+#endif /* PG_TRACE_H */
*
* Copyright (c) 2001-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.49 2006/08/19 01:36:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/pgstat.h,v 1.50 2006/10/04 00:30:06 momjian Exp $
* ----------
*/
#ifndef PGSTAT_H
PgStat_MsgHdr m_hdr;
Oid m_databaseid;
Oid m_tableoid;
- bool m_autovacuum;
- TimestampTz m_analyzetime;
+ bool m_autovacuum;
+ TimestampTz m_analyzetime;
PgStat_Counter m_live_tuples;
PgStat_Counter m_dead_tuples;
} PgStat_MsgAnalyze;
PgStat_Counter blocks_fetched;
PgStat_Counter blocks_hit;
- TimestampTz vacuum_timestamp; /* user initiated vacuum */
- TimestampTz autovac_vacuum_timestamp; /* autovacuum initiated */
- TimestampTz analyze_timestamp; /* user initiated */
- TimestampTz autovac_analyze_timestamp; /* autovacuum initiated */
+ TimestampTz vacuum_timestamp; /* user initiated vacuum */
+ TimestampTz autovac_vacuum_timestamp; /* autovacuum initiated */
+ TimestampTz analyze_timestamp; /* user initiated */
+ TimestampTz autovac_analyze_timestamp; /* autovacuum initiated */
} PgStat_StatTabEntry;
/*
* To avoid locking overhead, we use the following protocol: a backend
* increments st_changecount before modifying its entry, and again after
- * finishing a modification. A would-be reader should note the value
- * of st_changecount, copy the entry into private memory, then check
+ * finishing a modification. A would-be reader should note the value of
+ * st_changecount, copy the entry into private memory, then check
* st_changecount again. If the value hasn't changed, and if it's even,
* the copy is valid; otherwise start over. This makes updates cheap
* while reads are potentially expensive, but that's the tradeoff we want.
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/port.h,v 1.103 2006/10/03 22:18:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/port.h,v 1.104 2006/10/04 00:30:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Generated using Win32 "CMD /?":
*
- * 1. If all of the following conditions are met, then quote characters
- * on the command line are preserved:
+ * 1. If all of the following conditions are met, then quote characters
+ * on the command line are preserved:
*
- * - no /S switch
- * - exactly two quote characters
- * - no special characters between the two quote characters, where special
- * is one of: &<>()@^|
- * - there are one or more whitespace characters between the the two quote
- * characters
- * - the string between the two quote characters is the name of an
- * executable file.
+ * - no /S switch
+ * - exactly two quote characters
+ * - no special characters between the two quote characters, where special
+ * is one of: &<>()@^|
+ * - there are one or more whitespace characters between the the two quote
+ * characters
+ * - the string between the two quote characters is the name of an
+ * executable file.
*
- * 2. Otherwise, old behavior is to see if the first character is a quote
- * character and if so, strip the leading character and remove the last
- * quote character on the command line, preserving any text after the last
- * quote character.
+ * 2. Otherwise, old behavior is to see if the first character is a quote
+ * character and if so, strip the leading character and remove the last
+ * quote character on the command line, preserving any text after the last
+ * quote character.
*/
#if defined(WIN32) && !defined(__CYGWIN__)
#define SYSTEMQUOTE "\""
/*
* Versions of libintl >= 0.13 try to replace printf() and friends with
- * macros to their own versions that understand the %$ format. We do the
+ * macros to their own versions that understand the %$ format. We do the
* same, so disable their macros, if they exist.
*/
#ifdef vsnprintf
#define fprintf pg_fprintf
#define printf pg_printf
#endif
-
-#endif /* USE_REPL_SNPRINTF */
+#endif /* USE_REPL_SNPRINTF */
/* Portable prompt handling */
extern char *simple_prompt(const char *prompt, int maxlen, bool echo);
extern FILE *pgwin32_fopen(const char *, const char *);
#ifndef FRONTEND
-#define open(a,b,c) pgwin32_open(a,b,c)
+#define open(a,b,c) pgwin32_open(a,b,c)
#define fopen(a,b) pgwin32_fopen(a,b)
#endif
typedef int (*qsort_arg_comparator) (const void *a, const void *b, void *arg);
extern void qsort_arg(void *base, size_t nel, size_t elsize,
- qsort_arg_comparator cmp, void *arg);
+ qsort_arg_comparator cmp, void *arg);
-#endif /* PG_PORT_H */
+#endif /* PG_PORT_H */
-/* $PostgreSQL: pgsql/src/include/port/darwin.h,v 1.10 2006/03/11 04:38:38 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/darwin.h,v 1.11 2006/10/04 00:30:09 momjian Exp $ */
#define __darwin__ 1
#if HAVE_DECL_F_FULLFSYNC /* not present before OS X 10.3 */
#define HAVE_FSYNC_WRITETHROUGH
+
#endif
-/* $PostgreSQL: pgsql/src/include/port/linux.h,v 1.41 2006/03/11 04:38:38 momjian Exp $ */
-
+/* $PostgreSQL: pgsql/src/include/port/linux.h,v 1.42 2006/10/04 00:30:09 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/netbsd.h,v 1.15 2006/03/11 04:38:38 momjian Exp $ */
-
+/* $PostgreSQL: pgsql/src/include/port/netbsd.h,v 1.16 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.61 2006/10/03 20:33:20 tgl Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.62 2006/10/04 00:30:10 momjian Exp $ */
#if defined(_MSC_VER) || defined(__BORLANDC__)
-#define WIN32_ONLY_COMPILER
+#define WIN32_ONLY_COMPILER
#endif
/* undefine and redefine after #include */
#include
#include
#include
-#include /* for non-unicode version */
+#include /* for non-unicode version */
#undef near
/* Must be here to avoid conflicting with prototype in windows.h */
#else /* not BUILDING_DLL */
#define DLLIMPORT __declspec (dllimport)
#endif
-
#else /* not CYGWIN, not MSVC, not MingW */
#define DLLIMPORT
struct timeval it_interval;
struct timeval it_value;
};
-int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue);
+int setitimer(int which, const struct itimerval * value, struct itimerval * ovalue);
/*
/* Pulled from Makefile.port in mingw */
#define DLSUFFIX ".dll"
+
#endif
-/* $PostgreSQL: pgsql/src/include/port/win32/dlfcn.h,v 1.3 2006/03/11 04:38:38 momjian Exp $ */
-
-
+/* $PostgreSQL: pgsql/src/include/port/win32/dlfcn.h,v 1.4 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32/grp.h,v 1.3 2006/03/11 04:38:38 momjian Exp $ */
-
-
+/* $PostgreSQL: pgsql/src/include/port/win32/grp.h,v 1.4 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32/netdb.h,v 1.3 2006/03/11 04:38:38 momjian Exp $ */
-
+/* $PostgreSQL: pgsql/src/include/port/win32/netdb.h,v 1.4 2006/10/04 00:30:10 momjian Exp $ */
/*
- * $PostgreSQL: pgsql/src/include/port/win32/sys/socket.h,v 1.6 2006/07/16 01:35:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/port/win32/sys/socket.h,v 1.7 2006/10/04 00:30:10 momjian Exp $
*/
#ifndef WIN32_SYS_SOCKET_H
#define WIN32_SYS_SOCKET_H
*/
#undef gai_strerror
-#endif /* WIN32_SYS_SOCKET_H */
+#endif /* WIN32_SYS_SOCKET_H */
/*
* Headers for port/dirent.c, win32 native implementation of dirent functions
*
- * $PostgreSQL: pgsql/src/include/port/win32_msvc/dirent.h,v 1.2 2006/06/26 12:59:44 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/port/win32_msvc/dirent.h,v 1.3 2006/10/04 00:30:10 momjian Exp $
*/
#ifndef _WIN32VC_DIRENT_H
#define _WIN32VC_DIRENT_H
-struct dirent {
- long d_ino;
+struct dirent
+{
+ long d_ino;
unsigned short d_reclen;
unsigned short d_namlen;
- char d_name[MAX_PATH];
+ char d_name[MAX_PATH];
};
typedef struct DIR DIR;
-DIR* opendir(const char *);
-struct dirent* readdir(DIR *);
-int closedir(DIR*);
+DIR *opendir(const char *);
+struct dirent *readdir(DIR *);
+int closedir(DIR *);
+
#endif
-/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/file.h,v 1.2 2006/06/26 12:52:12 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/file.h,v 1.3 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/param.h,v 1.2 2006/06/26 12:52:12 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/param.h,v 1.3 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/time.h,v 1.2 2006/06/26 12:52:12 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32_msvc/sys/time.h,v 1.3 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32_msvc/unistd.h,v 1.2 2006/06/26 12:52:12 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32_msvc/unistd.h,v 1.3 2006/10/04 00:30:10 momjian Exp $ */
-/* $PostgreSQL: pgsql/src/include/port/win32_msvc/utime.h,v 1.2 2006/06/26 12:52:12 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32_msvc/utime.h,v 1.3 2006/10/04 00:30:10 momjian Exp $ */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/freespace.h,v 1.22 2006/09/21 20:31:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/freespace.h,v 1.23 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
FSMRelation *priorPhysical; /* prior rel in arena-storage order */
bool isIndex; /* if true, we store only page numbers */
Size avgRequest; /* moving average of space requests */
- BlockNumber interestingPages; /* # of pages with useful free space */
+ BlockNumber interestingPages; /* # of pages with useful free space */
int firstChunk; /* chunk # of my first chunk in arena */
int storedPages; /* # of pages stored in arena */
int nextPage; /* index (from 0) to start next search at */
Size spaceNeeded);
extern Size GetAvgFSMRequestSize(RelFileNode *rel);
extern void RecordRelationFreeSpace(RelFileNode *rel,
- BlockNumber interestingPages,
- int nPages,
- PageFreeSpaceInfo *pageSpaces);
+ BlockNumber interestingPages,
+ int nPages,
+ PageFreeSpaceInfo *pageSpaces);
extern BlockNumber GetFreeIndexPage(RelFileNode *rel);
extern void RecordIndexFreeSpace(RelFileNode *rel,
- BlockNumber interestingPages,
- int nPages,
- BlockNumber *pages);
+ BlockNumber interestingPages,
+ int nPages,
+ BlockNumber *pages);
extern void FreeSpaceMapTruncateRel(RelFileNode *rel, BlockNumber nblocks);
extern void FreeSpaceMapForgetRel(RelFileNode *rel);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.100 2006/09/22 23:20:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.101 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int numLockModes;
bool transactional;
const LOCKMASK *conflictTab;
- const char * const *lockModeNames;
+ const char *const * lockModeNames;
const bool *trace_flag;
} LockMethodData;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/pg_sema.h,v 1.9 2006/04/29 16:34:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pg_sema.h,v 1.10 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef USE_WIN32_SEMAPHORES
-typedef HANDLE PGSemaphoreData;
+typedef HANDLE PGSemaphoreData;
#endif
typedef PGSemaphoreData *PGSemaphore;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/pg_shmem.h,v 1.19 2006/08/01 19:03:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/pg_shmem.h,v 1.20 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void RegisterAddinContext(const char *name, Size size);
extern Size AddinShmemSize(void);
-extern void InitAddinContexts(void * start);
+extern void InitAddinContexts(void *start);
extern void *ShmemAllocFromContext(Size size, const char *name);
extern void ShmemResetContext(const char *name);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.90 2006/07/30 02:07:18 alvherre Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.91 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid roleId; /* OID of role using this backend */
bool inVacuum; /* true if current xact is a LAZY VACUUM */
-
+
/* Info about LWLock the process is currently waiting for, if any. */
bool lwWaiting; /* true if waiting for an LW lock */
bool lwExclusive; /* true if waiting for exclusive access */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.83 2006/09/08 15:55:53 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.84 2006/10/04 00:30:10 momjian Exp $
*
* OLD COMMENTS
* This file was created so that other c files could get the two
extern void set_debug_options(int debug_flag,
GucContext context, GucSource source);
extern bool set_plan_disabling_options(const char *arg,
- GucContext context, GucSource source);
+ GucContext context, GucSource source);
extern const char *get_stats_option_name(const char *arg);
#endif /* TCOPPROT_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.97 2006/09/05 21:08:36 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.98 2006/10/04 00:30:10 momjian Exp $
*
* NOTES
* An ACL array is simply an array of AclItems, representing the union
*/
typedef struct
{
- bool is_grant;
+ bool is_grant;
GrantObjectType objtype;
- List *objects;
- bool all_privs;
- AclMode privileges;
- List *grantees;
- bool grant_option;
+ List *objects;
+ bool all_privs;
+ AclMode privileges;
+ List *grantees;
+ bool grant_option;
DropBehavior behavior;
} InternalGrant;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.61 2006/07/13 18:01:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/catcache.h,v 1.62 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Each tuple in a cache is a member of a Dllist that stores the elements
- * of its hash bucket. We keep each Dllist in LRU order to speed repeated
+ * of its hash bucket. We keep each Dllist in LRU order to speed repeated
* lookups.
*/
Dlelem cache_elem; /* list member of per-bucket list */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.44 2006/07/22 23:04:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.45 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Only those fields indicated by hash_flags need be set */
typedef struct HASHCTL
{
- long num_partitions; /* # partitions (must be power of 2) */
+ long num_partitions; /* # partitions (must be power of 2) */
long ssize; /* segment size */
long dsize; /* (initial) directory size */
long max_dsize; /* limit to dsize if dir size is limited */
bool *foundPtr);
extern uint32 get_hash_value(HTAB *hashp, const void *keyPtr);
extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr,
- uint32 hashvalue, HASHACTION action,
- bool *foundPtr);
+ uint32 hashvalue, HASHACTION action,
+ bool *foundPtr);
extern long hash_get_num_entries(HTAB *hashp);
extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
extern void *hash_seq_search(HASH_SEQ_STATUS *status);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.106 2006/09/28 20:51:43 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.107 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Oid get_opclass_member(Oid opclass, Oid subtype, int16 strategy);
extern Oid get_op_hash_function(Oid opno);
extern void get_op_btree_interpretation(Oid opno,
- List **opclasses, List **opstrats);
+ List **opclasses, List **opstrats);
extern Oid get_opclass_proc(Oid opclass, Oid subtype, int16 procnum);
extern char *get_attname(Oid relid, AttrNumber attnum);
extern char *get_relid_attribute_name(Oid relid, AttrNumber attnum);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.70 2006/09/07 22:52:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.71 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
long portalPos;
/* Presentation data, primarily used by the pg_cursors system view */
- TimestampTz creation_time; /* time at which this portal was defined */
+ TimestampTz creation_time; /* time at which this portal was defined */
bool visible; /* include this portal in pg_cursors? */
} PortalData;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.91 2006/07/03 22:45:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.92 2006/10/04 00:30:10 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* rd_options is set whenever rd_rel is loaded into the relcache entry.
- * Note that you can NOT look into rd_rel for this data. NULL means
- * "use defaults".
+ * Note that you can NOT look into rd_rel for this data. NULL means "use
+ * defaults".
*/
bytea *rd_options; /* parsed pg_class.reloptions */
* Note: rd_amcache is available for index AMs to cache private data about
* an index. This must be just a cache since it may get reset at any time
* (in particular, it will get reset by a relcache inval message for the
- * index). If used, it must point to a single memory chunk palloc'd in
+ * index). If used, it must point to a single memory chunk palloc'd in
* rd_indexcxt. A relcache reset will include freeing that chunk and
* setting rd_amcache = NULL.
*/
*/
typedef struct StdRdOptions
{
- int32 vl_len; /* required to be a bytea */
- int fillfactor; /* page fill factor in percent (0..100) */
+ int32 vl_len; /* required to be a bytea */
+ int fillfactor; /* page fill factor in percent (0..100) */
} StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.8 2006/07/13 18:01:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.9 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* support for tupledesc refcount management */
extern void ResourceOwnerEnlargeTupleDescs(ResourceOwner owner);
extern void ResourceOwnerRememberTupleDesc(ResourceOwner owner,
- TupleDesc tupdesc);
+ TupleDesc tupdesc);
extern void ResourceOwnerForgetTupleDesc(ResourceOwner owner,
- TupleDesc tupdesc);
+ TupleDesc tupdesc);
#endif /* RESOWNER_H */
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.35 2006/09/20 19:50:21 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.36 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* selfuncs.c */
extern void examine_variable(PlannerInfo *root, Node *node, int varRelid,
- VariableStatData *vardata);
+ VariableStatData *vardata);
extern bool get_restriction_variable(PlannerInfo *root, List *args,
- int varRelid,
- VariableStatData *vardata, Node **other,
- bool *varonleft);
+ int varRelid,
+ VariableStatData *vardata, Node **other,
+ bool *varonleft);
extern void get_join_variables(PlannerInfo *root, List *args,
- VariableStatData *vardata1,
- VariableStatData *vardata2);
+ VariableStatData *vardata1,
+ VariableStatData *vardata2);
extern double get_variable_numdistinct(VariableStatData *vardata);
extern double mcv_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
- Datum constval, bool varonleft,
- double *sumcommonp);
+ Datum constval, bool varonleft,
+ double *sumcommonp);
extern double histogram_selectivity(VariableStatData *vardata, FmgrInfo *opproc,
- Datum constval, bool varonleft,
- int min_hist_size, int n_skip);
+ Datum constval, bool varonleft,
+ int min_hist_size, int n_skip);
extern Pattern_Prefix_Status pattern_fixed_prefix(Const *patt,
Pattern_Type ptype,
extern Selectivity nulltestsel(PlannerInfo *root, NullTestType nulltesttype,
Node *arg, int varRelid);
extern Selectivity scalararraysel(PlannerInfo *root,
- ScalarArrayOpExpr *clause,
- bool is_join_clause,
- int varRelid, JoinType jointype);
+ ScalarArrayOpExpr *clause,
+ bool is_join_clause,
+ int varRelid, JoinType jointype);
extern int estimate_array_length(Node *arrayexpr);
extern Selectivity rowcomparesel(PlannerInfo *root,
- RowCompareExpr *clause,
- int varRelid, JoinType jointype);
+ RowCompareExpr *clause,
+ int varRelid, JoinType jointype);
extern void mergejoinscansel(PlannerInfo *root, Node *clause,
Selectivity *leftscan,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.63 2006/09/05 01:13:40 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.64 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#else
typedef double fsec_t;
-
#endif
/*
extern TimestampTz GetCurrentTimestamp(void);
extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs);
+ long *secs, int *microsecs);
extern TimestampTz time_t_to_timestamptz(time_t tm);
extern time_t timestamptz_to_time_t(TimestampTz t);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.63 2006/09/03 15:59:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tqual.h,v 1.64 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* note: all ids in xip[] satisfy xmin <= xip[i] < xmax */
int32 subxcnt; /* # of xact ids in subxip[], -1 if overflow */
TransactionId *subxip; /* array of subxact IDs in progress */
+
/*
* note: all ids in subxip[] are >= xmin, but we don't bother filtering
* out any that are >= xmax
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.22 2006/07/13 16:49:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tuplesort.h,v 1.23 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int workMem, bool randomAccess);
extern void tuplesort_puttupleslot(Tuplesortstate *state,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
extern void tuplesort_putindextuple(Tuplesortstate *state, IndexTuple tuple);
extern void tuplesort_putdatum(Tuplesortstate *state, Datum val,
bool isNull);
extern void tuplesort_performsort(Tuplesortstate *state);
extern bool tuplesort_gettupleslot(Tuplesortstate *state, bool forward,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
extern IndexTuple tuplesort_getindextuple(Tuplesortstate *state, bool forward,
- bool *should_free);
+ bool *should_free);
extern bool tuplesort_getdatum(Tuplesortstate *state, bool forward,
Datum *val, bool *isNull);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tuplestore.h,v 1.18 2006/06/27 02:51:40 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tuplestore.h,v 1.19 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int maxKBytes);
extern void tuplestore_puttupleslot(Tuplestorestate *state,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
extern void tuplestore_puttuple(Tuplestorestate *state, HeapTuple tuple);
/* tuplestore_donestoring() used to be required, but is no longer used */
/* backwards scan is only allowed if randomAccess was specified 'true' */
extern bool tuplestore_gettupleslot(Tuplestorestate *state, bool forward,
- TupleTableSlot *slot);
+ TupleTableSlot *slot);
extern bool tuplestore_advance(Tuplestorestate *state, bool forward);
extern void tuplestore_end(Tuplestorestate *state);
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.11 2006/06/16 18:42:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.12 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Tuple descriptor if it's a composite type (row type). NULL if not
- * composite or information hasn't yet been requested. (NOTE: this is
- * a reference-counted tupledesc.)
+ * composite or information hasn't yet been requested. (NOTE: this is a
+ * reference-counted tupledesc.)
*/
TupleDesc tupDesc;
} TypeCacheEntry;
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/tzparser.h,v 1.1 2006/07/25 03:51:22 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/tzparser.h,v 1.2 2006/10/04 00:30:11 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct tzEntry
{
/* the actual data: TZ abbrev (downcased), offset, DST flag */
- char *abbrev;
- int offset; /* in seconds from UTC */
- bool is_dst;
+ char *abbrev;
+ int offset; /* in seconds from UTC */
+ bool is_dst;
/* source information (for error messages) */
- int lineno;
+ int lineno;
const char *filename;
} tzEntry;
extern bool load_tzoffsets(const char *filename, bool doit, int elevel);
-#endif /* TZPARSER_H */
+#endif /* TZPARSER_H */
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.47 2006/08/15 06:40:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/compatlib/informix.c,v 1.48 2006/10/04 00:30:11 momjian Exp $ */
#include
#include
int
deccvasc(char *cp, int len, decimal *np)
{
- char *str;
- int ret = 0;
- numeric *result;
+ char *str;
+ int ret = 0;
+ numeric *result;
rsetnull(CDECIMALTYPE, (char *) np);
if (risnull(CSTRINGTYPE, cp))
return 0;
- str = ecpg_strndup(cp, len); /* decimal_in always converts the complete string */
+ str = ecpg_strndup(cp, len);/* decimal_in always converts the complete
+ * string */
if (!str)
ret = ECPG_INFORMIX_NUM_UNDERFLOW;
else
return -1;
/*
- * TODO: have to take care of len here and create exponential notation
- * if necessary
+ * TODO: have to take care of len here and create exponential notation if
+ * necessary
*/
if ((int) (strlen(str) + 1) > len)
{
}
/*
- * rfmt.c - description
+ * rfmt.c - description
* by Carsten Wolff , Wed Apr 2 2003
*/
void
ldchar(char *src, int len, char *dest)
{
- int dlen = byleng(src, len);
+ int dlen = byleng(src, len);
+
memmove(dest, src, dlen);
dest[dlen] = '\0';
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.35 2006/08/29 12:24:51 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.36 2006/10/04 00:30:11 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
pthread_key_create(&actual_connection_key, NULL);
}
-void ecpg_pthreads_init(void)
+void
+ecpg_pthreads_init(void)
{
pthread_once(&actual_connection_key_once, ecpg_actual_connection_init);
}
if (sqlstate == NULL)
sqlstate = ECPG_SQLSTATE_ECPG_INTERNAL_ERROR;
- if (message == NULL) /* Shouldn't happen, but need to be sure */
+ if (message == NULL) /* Shouldn't happen, but need to be sure */
message = "No message received";
/* these are not warnings */
ECPGraise(lineno, ECPG_CONNECT, ECPG_SQLSTATE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION, realname ? realname : "");
if (host)
ECPGfree(host);
- /* port not set yet
- * if (port)
- * ECPGfree(port); */
+
+ /*
+ * port not set yet if (port) ECPGfree(port);
+ */
if (options)
ECPGfree(options);
if (realname)
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.34 2006/08/24 10:35:58 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.35 2006/10/04 00:30:11 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#include "pgtypes_timestamp.h"
#include "pgtypes_interval.h"
-static enum { NOT_CHECKED, REGRESS, NORMAL } ECPG_regression_mode = NOT_CHECKED;
+static enum
+{
+ NOT_CHECKED, REGRESS, NORMAL
+} ECPG_regression_mode = NOT_CHECKED;
static bool
garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat)
long ind_offset, enum ARRAY_TYPE isarray, enum COMPAT_MODE compat, bool force_indicator)
{
struct sqlca_t *sqlca = ECPGget_sqlca();
- char *pval = (char *) PQgetvalue(results, act_tuple, act_field);
- int binary = PQfformat(results, act_field);
- int size = PQgetlength(results, act_tuple, act_field);
- int value_for_indicator = 0;
- long log_offset;
+ char *pval = (char *) PQgetvalue(results, act_tuple, act_field);
+ int binary = PQfformat(results, act_field);
+ int size = PQgetlength(results, act_tuple, act_field);
+ int value_for_indicator = 0;
+ long log_offset;
/*
* use a global variable to see if the environment variable
{
if (getenv("ECPG_REGRESSION"))
ECPG_regression_mode = REGRESS;
- else
+ else
ECPG_regression_mode = NORMAL;
}
/*
- * If we are running in a regression test, do not log the offset
- * variable, it depends on the machine's alignment.
+ * If we are running in a regression test, do not log the offset variable,
+ * it depends on the machine's alignment.
*/
if (ECPG_regression_mode == REGRESS)
log_offset = -1;
{
if (pval)
{
- if (varcharsize == 0 || varcharsize*offset >= size)
+ if (varcharsize == 0 || varcharsize * offset >= size)
memcpy((char *) ((long) var + offset * act_tuple),
pval, size);
else
{
memcpy((char *) ((long) var + offset * act_tuple),
- pval, varcharsize*offset);
+ pval, varcharsize * offset);
- if (varcharsize*offset < size)
+ if (varcharsize * offset < size)
{
/* truncation */
switch (ind_type)
}
pval += size;
}
- }
+ }
else
{
- switch (type)
- {
- long res;
- unsigned long ures;
- double dres;
- char *scan_length;
- numeric *nres;
- date ddres;
- timestamp tres;
- interval *ires;
-
- case ECPGt_short:
- case ECPGt_int:
- case ECPGt_long:
- if (pval)
- {
- res = strtol(pval, &scan_length, 10);
- if (garbage_left(isarray, scan_length, compat))
+ switch (type)
+ {
+ long res;
+ unsigned long ures;
+ double dres;
+ char *scan_length;
+ numeric *nres;
+ date ddres;
+ timestamp tres;
+ interval *ires;
+
+ case ECPGt_short:
+ case ECPGt_int:
+ case ECPGt_long:
+ if (pval)
{
- ECPGraise(lineno, ECPG_INT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ res = strtol(pval, &scan_length, 10);
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_INT_FORMAT,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ }
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- res = 0L;
+ else
+ res = 0L;
- switch (type)
- {
- case ECPGt_short:
- *((short *) (var + offset * act_tuple)) = (short) res;
- break;
- case ECPGt_int:
- *((int *) (var + offset * act_tuple)) = (int) res;
- break;
- case ECPGt_long:
- *((long *) (var + offset * act_tuple)) = (long) res;
- break;
- default:
- /* Cannot happen */
- break;
- }
- break;
+ switch (type)
+ {
+ case ECPGt_short:
+ *((short *) (var + offset * act_tuple)) = (short) res;
+ break;
+ case ECPGt_int:
+ *((int *) (var + offset * act_tuple)) = (int) res;
+ break;
+ case ECPGt_long:
+ *((long *) (var + offset * act_tuple)) = (long) res;
+ break;
+ default:
+ /* Cannot happen */
+ break;
+ }
+ break;
- case ECPGt_unsigned_short:
- case ECPGt_unsigned_int:
- case ECPGt_unsigned_long:
- if (pval)
- {
- ures = strtoul(pval, &scan_length, 10);
- if (garbage_left(isarray, scan_length, compat))
+ case ECPGt_unsigned_short:
+ case ECPGt_unsigned_int:
+ case ECPGt_unsigned_long:
+ if (pval)
{
- ECPGraise(lineno, ECPG_UINT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ ures = strtoul(pval, &scan_length, 10);
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_UINT_FORMAT,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ }
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- ures = 0L;
+ else
+ ures = 0L;
- switch (type)
- {
- case ECPGt_unsigned_short:
- *((unsigned short *) (var + offset * act_tuple)) = (unsigned short) ures;
- break;
- case ECPGt_unsigned_int:
- *((unsigned int *) (var + offset * act_tuple)) = (unsigned int) ures;
- break;
- case ECPGt_unsigned_long:
- *((unsigned long *) (var + offset * act_tuple)) = (unsigned long) ures;
- break;
- default:
- /* Cannot happen */
- break;
- }
- break;
+ switch (type)
+ {
+ case ECPGt_unsigned_short:
+ *((unsigned short *) (var + offset * act_tuple)) = (unsigned short) ures;
+ break;
+ case ECPGt_unsigned_int:
+ *((unsigned int *) (var + offset * act_tuple)) = (unsigned int) ures;
+ break;
+ case ECPGt_unsigned_long:
+ *((unsigned long *) (var + offset * act_tuple)) = (unsigned long) ures;
+ break;
+ default:
+ /* Cannot happen */
+ break;
+ }
+ break;
#ifdef HAVE_LONG_LONG_INT_64
#ifdef HAVE_STRTOLL
- case ECPGt_long_long:
- if (pval)
- {
- *((long long int *) (var + offset * act_tuple)) = strtoll(pval, &scan_length, 10);
- if (garbage_left(isarray, scan_length, compat))
+ case ECPGt_long_long:
+ if (pval)
{
- ECPGraise(lineno, ECPG_INT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ *((long long int *) (var + offset * act_tuple)) = strtoll(pval, &scan_length, 10);
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_INT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ }
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- *((long long int *) (var + offset * act_tuple)) = (long long) 0;
+ else
+ *((long long int *) (var + offset * act_tuple)) = (long long) 0;
- break;
+ break;
#endif /* HAVE_STRTOLL */
#ifdef HAVE_STRTOULL
- case ECPGt_unsigned_long_long:
- if (pval)
- {
- *((unsigned long long int *) (var + offset * act_tuple)) = strtoull(pval, &scan_length, 10);
- if ((isarray && *scan_length != ',' && *scan_length != '}')
- || (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' ')) /* Garbage left */
+ case ECPGt_unsigned_long_long:
+ if (pval)
{
- ECPGraise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ *((unsigned long long int *) (var + offset * act_tuple)) = strtoull(pval, &scan_length, 10);
+ if ((isarray && *scan_length != ',' && *scan_length != '}')
+ || (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' ')) /* Garbage left */
+ {
+ ECPGraise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ }
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- *((unsigned long long int *) (var + offset * act_tuple)) = (long long) 0;
+ else
+ *((unsigned long long int *) (var + offset * act_tuple)) = (long long) 0;
- break;
+ break;
#endif /* HAVE_STRTOULL */
#endif /* HAVE_LONG_LONG_INT_64 */
- case ECPGt_float:
- case ECPGt_double:
- if (pval)
- {
- if (isarray && *pval == '"')
- dres = strtod(pval + 1, &scan_length);
- else
- dres = strtod(pval, &scan_length);
-
- if (isarray && *scan_length == '"')
- scan_length++;
-
- if (garbage_left(isarray, scan_length, compat))
+ case ECPGt_float:
+ case ECPGt_double:
+ if (pval)
{
- ECPGraise(lineno, ECPG_FLOAT_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
- }
- pval = scan_length;
- }
- else
- dres = 0.0;
+ if (isarray && *pval == '"')
+ dres = strtod(pval + 1, &scan_length);
+ else
+ dres = strtod(pval, &scan_length);
- switch (type)
- {
- case ECPGt_float:
- *((float *) (var + offset * act_tuple)) = dres;
- break;
- case ECPGt_double:
- *((double *) (var + offset * act_tuple)) = dres;
- break;
- default:
- /* Cannot happen */
- break;
- }
- break;
+ if (isarray && *scan_length == '"')
+ scan_length++;
- case ECPGt_bool:
- if (pval)
- {
- if (pval[0] == 'f' && pval[1] == '\0')
- {
- if (offset == sizeof(char))
- *((char *) (var + offset * act_tuple)) = false;
- else if (offset == sizeof(int))
- *((int *) (var + offset * act_tuple)) = false;
- else
- ECPGraise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH,
- "different size");
- break;
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_FLOAT_FORMAT,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ }
+ pval = scan_length;
}
- else if (pval[0] == 't' && pval[1] == '\0')
+ else
+ dres = 0.0;
+
+ switch (type)
{
- if (offset == sizeof(char))
- *((char *) (var + offset * act_tuple)) = true;
- else if (offset == sizeof(int))
- *((int *) (var + offset * act_tuple)) = true;
- else
- ECPGraise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH,
- "different size");
- break;
+ case ECPGt_float:
+ *((float *) (var + offset * act_tuple)) = dres;
+ break;
+ case ECPGt_double:
+ *((double *) (var + offset * act_tuple)) = dres;
+ break;
+ default:
+ /* Cannot happen */
+ break;
}
- else if (pval[0] == '\0' && PQgetisnull(results, act_tuple, act_field))
+ break;
+
+ case ECPGt_bool:
+ if (pval)
{
- /* NULL is valid */
- break;
+ if (pval[0] == 'f' && pval[1] == '\0')
+ {
+ if (offset == sizeof(char))
+ *((char *) (var + offset * act_tuple)) = false;
+ else if (offset == sizeof(int))
+ *((int *) (var + offset * act_tuple)) = false;
+ else
+ ECPGraise(lineno, ECPG_CONVERT_BOOL,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH,
+ "different size");
+ break;
+ }
+ else if (pval[0] == 't' && pval[1] == '\0')
+ {
+ if (offset == sizeof(char))
+ *((char *) (var + offset * act_tuple)) = true;
+ else if (offset == sizeof(int))
+ *((int *) (var + offset * act_tuple)) = true;
+ else
+ ECPGraise(lineno, ECPG_CONVERT_BOOL,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH,
+ "different size");
+ break;
+ }
+ else if (pval[0] == '\0' && PQgetisnull(results, act_tuple, act_field))
+ {
+ /* NULL is valid */
+ break;
+ }
}
- }
- ECPGraise(lineno, ECPG_CONVERT_BOOL,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
- break;
+ ECPGraise(lineno, ECPG_CONVERT_BOOL,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
+ return (false);
+ break;
- case ECPGt_char:
- case ECPGt_unsigned_char:
- if (pval)
- {
- if (varcharsize == 0 || varcharsize > size)
- strncpy((char *) ((long) var + offset * act_tuple), pval, size + 1);
- else
+ case ECPGt_char:
+ case ECPGt_unsigned_char:
+ if (pval)
{
- strncpy((char *) ((long) var + offset * act_tuple), pval, varcharsize);
-
- if (varcharsize < size)
+ if (varcharsize == 0 || varcharsize > size)
+ strncpy((char *) ((long) var + offset * act_tuple), pval, size + 1);
+ else
{
- /* truncation */
- switch (ind_type)
+ strncpy((char *) ((long) var + offset * act_tuple), pval, varcharsize);
+
+ if (varcharsize < size)
{
- case ECPGt_short:
- case ECPGt_unsigned_short:
- *((short *) (ind + ind_offset * act_tuple)) = size;
- break;
- case ECPGt_int:
- case ECPGt_unsigned_int:
- *((int *) (ind + ind_offset * act_tuple)) = size;
- break;
- case ECPGt_long:
- case ECPGt_unsigned_long:
- *((long *) (ind + ind_offset * act_tuple)) = size;
- break;
+ /* truncation */
+ switch (ind_type)
+ {
+ case ECPGt_short:
+ case ECPGt_unsigned_short:
+ *((short *) (ind + ind_offset * act_tuple)) = size;
+ break;
+ case ECPGt_int:
+ case ECPGt_unsigned_int:
+ *((int *) (ind + ind_offset * act_tuple)) = size;
+ break;
+ case ECPGt_long:
+ case ECPGt_unsigned_long:
+ *((long *) (ind + ind_offset * act_tuple)) = size;
+ break;
#ifdef HAVE_LONG_LONG_INT_64
- case ECPGt_long_long:
- case ECPGt_unsigned_long_long:
- *((long long int *) (ind + ind_offset * act_tuple)) = size;
- break;
+ case ECPGt_long_long:
+ case ECPGt_unsigned_long_long:
+ *((long long int *) (ind + ind_offset * act_tuple)) = size;
+ break;
#endif /* HAVE_LONG_LONG_INT_64 */
- default:
- break;
+ default:
+ break;
+ }
+ sqlca->sqlwarn[0] = sqlca->sqlwarn[1] = 'W';
}
- sqlca->sqlwarn[0] = sqlca->sqlwarn[1] = 'W';
}
+ pval += size;
}
- pval += size;
- }
- break;
+ break;
- case ECPGt_varchar:
- if (pval)
- {
- struct ECPGgeneric_varchar *variable =
- (struct ECPGgeneric_varchar *) ((long) var + offset * act_tuple);
-
- variable->len = size;
- if (varcharsize == 0)
- strncpy(variable->arr, pval, variable->len);
- else
+ case ECPGt_varchar:
+ if (pval)
{
- strncpy(variable->arr, pval, varcharsize);
+ struct ECPGgeneric_varchar *variable =
+ (struct ECPGgeneric_varchar *) ((long) var + offset * act_tuple);
- if (variable->len > varcharsize)
+ variable->len = size;
+ if (varcharsize == 0)
+ strncpy(variable->arr, pval, variable->len);
+ else
{
- /* truncation */
- switch (ind_type)
+ strncpy(variable->arr, pval, varcharsize);
+
+ if (variable->len > varcharsize)
{
- case ECPGt_short:
- case ECPGt_unsigned_short:
- *((short *) (ind + offset * act_tuple)) = variable->len;
- break;
- case ECPGt_int:
- case ECPGt_unsigned_int:
- *((int *) (ind + offset * act_tuple)) = variable->len;
- break;
- case ECPGt_long:
- case ECPGt_unsigned_long:
- *((long *) (ind + offset * act_tuple)) = variable->len;
- break;
+ /* truncation */
+ switch (ind_type)
+ {
+ case ECPGt_short:
+ case ECPGt_unsigned_short:
+ *((short *) (ind + offset * act_tuple)) = variable->len;
+ break;
+ case ECPGt_int:
+ case ECPGt_unsigned_int:
+ *((int *) (ind + offset * act_tuple)) = variable->len;
+ break;
+ case ECPGt_long:
+ case ECPGt_unsigned_long:
+ *((long *) (ind + offset * act_tuple)) = variable->len;
+ break;
#ifdef HAVE_LONG_LONG_INT_64
- case ECPGt_long_long:
- case ECPGt_unsigned_long_long:
- *((long long int *) (ind + ind_offset * act_tuple)) = variable->len;
- break;
+ case ECPGt_long_long:
+ case ECPGt_unsigned_long_long:
+ *((long long int *) (ind + ind_offset * act_tuple)) = variable->len;
+ break;
#endif /* HAVE_LONG_LONG_INT_64 */
- default:
- break;
- }
- sqlca->sqlwarn[0] = sqlca->sqlwarn[1] = 'W';
+ default:
+ break;
+ }
+ sqlca->sqlwarn[0] = sqlca->sqlwarn[1] = 'W';
- variable->len = varcharsize;
+ variable->len = varcharsize;
+ }
}
+ pval += size;
}
- pval += size;
- }
- break;
+ break;
- case ECPGt_decimal:
- case ECPGt_numeric:
- if (pval)
- {
- if (isarray && *pval == '"')
- nres = PGTYPESnumeric_from_asc(pval + 1, &scan_length);
- else
- nres = PGTYPESnumeric_from_asc(pval, &scan_length);
-
- /* did we get an error? */
- if (nres == NULL)
+ case ECPGt_decimal:
+ case ECPGt_numeric:
+ if (pval)
{
- ECPGlog("ECPGget_data line %d: RESULT: %s errno %d\n",
- lineno, pval ? pval : "", errno);
+ if (isarray && *pval == '"')
+ nres = PGTYPESnumeric_from_asc(pval + 1, &scan_length);
+ else
+ nres = PGTYPESnumeric_from_asc(pval, &scan_length);
- if (INFORMIX_MODE(compat))
+ /* did we get an error? */
+ if (nres == NULL)
{
- /*
- * Informix wants its own NULL value here instead
- * of an error
- */
- nres = PGTYPESnumeric_new();
- if (nres)
- ECPGset_noind_null(ECPGt_numeric, nres);
+ ECPGlog("ECPGget_data line %d: RESULT: %s errno %d\n",
+ lineno, pval ? pval : "", errno);
+
+ if (INFORMIX_MODE(compat))
+ {
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
+ nres = PGTYPESnumeric_new();
+ if (nres)
+ ECPGset_noind_null(ECPGt_numeric, nres);
+ else
+ {
+ ECPGraise(lineno, ECPG_OUT_OF_MEMORY,
+ ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL);
+ return (false);
+ }
+ }
else
{
- ECPGraise(lineno, ECPG_OUT_OF_MEMORY,
- ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY, NULL);
+ ECPGraise(lineno, ECPG_NUMERIC_FORMAT,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
}
}
else
{
- ECPGraise(lineno, ECPG_NUMERIC_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
- }
- }
- else
- {
- if (isarray && *scan_length == '"')
- scan_length++;
+ if (isarray && *scan_length == '"')
+ scan_length++;
- if (garbage_left(isarray, scan_length, compat))
- {
- free(nres);
- ECPGraise(lineno, ECPG_NUMERIC_FORMAT,
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ free(nres);
+ ECPGraise(lineno, ECPG_NUMERIC_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- nres = PGTYPESnumeric_from_asc("0.0", &scan_length);
-
- if (type == ECPGt_numeric)
- PGTYPESnumeric_copy(nres, (numeric *) (var + offset * act_tuple));
- else
- PGTYPESnumeric_to_decimal(nres, (decimal *) (var + offset * act_tuple));
-
- free(nres);
- break;
+ else
+ nres = PGTYPESnumeric_from_asc("0.0", &scan_length);
- case ECPGt_interval:
- if (pval)
- {
- if (isarray && *pval == '"')
- ires = PGTYPESinterval_from_asc(pval + 1, &scan_length);
+ if (type == ECPGt_numeric)
+ PGTYPESnumeric_copy(nres, (numeric *) (var + offset * act_tuple));
else
- ires = PGTYPESinterval_from_asc(pval, &scan_length);
+ PGTYPESnumeric_to_decimal(nres, (decimal *) (var + offset * act_tuple));
+
+ free(nres);
+ break;
- /* did we get an error? */
- if (ires == NULL)
+ case ECPGt_interval:
+ if (pval)
{
- if (INFORMIX_MODE(compat))
+ if (isarray && *pval == '"')
+ ires = PGTYPESinterval_from_asc(pval + 1, &scan_length);
+ else
+ ires = PGTYPESinterval_from_asc(pval, &scan_length);
+
+ /* did we get an error? */
+ if (ires == NULL)
{
- /*
- * Informix wants its own NULL value here instead
- * of an error
- */
- ires = (interval *) ECPGalloc(sizeof(interval), lineno);
- if (!ires)
+ if (INFORMIX_MODE(compat))
+ {
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
+ ires = (interval *) ECPGalloc(sizeof(interval), lineno);
+ if (!ires)
+ return (false);
+
+ ECPGset_noind_null(ECPGt_interval, ires);
+ }
+ else
+ {
+ ECPGraise(lineno, ECPG_INTERVAL_FORMAT,
+ ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
-
- ECPGset_noind_null(ECPGt_interval, ires);
+ }
}
else
{
- ECPGraise(lineno, ECPG_INTERVAL_FORMAT,
- ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
- }
- }
- else
- {
- if (isarray && *scan_length == '"')
- scan_length++;
+ if (isarray && *scan_length == '"')
+ scan_length++;
- if (garbage_left(isarray, scan_length, compat))
- {
- free(ires);
- ECPGraise(lineno, ECPG_INTERVAL_FORMAT,
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ free(ires);
+ ECPGraise(lineno, ECPG_INTERVAL_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
+ pval = scan_length;
}
- pval = scan_length;
- }
- else
- ires = PGTYPESinterval_from_asc("0 seconds", NULL);
-
- PGTYPESinterval_copy(ires, (interval *) (var + offset * act_tuple));
- free(ires);
- break;
- case ECPGt_date:
- if (pval)
- {
- if (isarray && *pval == '"')
- ddres = PGTYPESdate_from_asc(pval + 1, &scan_length);
else
- ddres = PGTYPESdate_from_asc(pval, &scan_length);
+ ires = PGTYPESinterval_from_asc("0 seconds", NULL);
- /* did we get an error? */
- if (errno != 0)
+ PGTYPESinterval_copy(ires, (interval *) (var + offset * act_tuple));
+ free(ires);
+ break;
+ case ECPGt_date:
+ if (pval)
{
- if (INFORMIX_MODE(compat))
- {
- /*
- * Informix wants its own NULL value here instead
- * of an error
- */
- ECPGset_noind_null(ECPGt_date, &ddres);
- }
+ if (isarray && *pval == '"')
+ ddres = PGTYPESdate_from_asc(pval + 1, &scan_length);
else
+ ddres = PGTYPESdate_from_asc(pval, &scan_length);
+
+ /* did we get an error? */
+ if (errno != 0)
{
- ECPGraise(lineno, ECPG_DATE_FORMAT,
+ if (INFORMIX_MODE(compat))
+ {
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
+ ECPGset_noind_null(ECPGt_date, &ddres);
+ }
+ else
+ {
+ ECPGraise(lineno, ECPG_DATE_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
- }
- else
- {
- if (isarray && *scan_length == '"')
- scan_length++;
-
- if (garbage_left(isarray, scan_length, compat))
+ else
{
- ECPGraise(lineno, ECPG_DATE_FORMAT,
+ if (isarray && *scan_length == '"')
+ scan_length++;
+
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_DATE_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
- }
-
- *((date *) (var + offset * act_tuple)) = ddres;
- pval = scan_length;
- }
- break;
- case ECPGt_timestamp:
- if (pval)
- {
- if (isarray && *pval == '"')
- tres = PGTYPEStimestamp_from_asc(pval + 1, &scan_length);
- else
- tres = PGTYPEStimestamp_from_asc(pval, &scan_length);
+ *((date *) (var + offset * act_tuple)) = ddres;
+ pval = scan_length;
+ }
+ break;
- /* did we get an error? */
- if (errno != 0)
+ case ECPGt_timestamp:
+ if (pval)
{
- if (INFORMIX_MODE(compat))
- {
- /*
- * Informix wants its own NULL value here instead
- * of an error
- */
- ECPGset_noind_null(ECPGt_timestamp, &tres);
- }
+ if (isarray && *pval == '"')
+ tres = PGTYPEStimestamp_from_asc(pval + 1, &scan_length);
else
+ tres = PGTYPEStimestamp_from_asc(pval, &scan_length);
+
+ /* did we get an error? */
+ if (errno != 0)
{
- ECPGraise(lineno, ECPG_TIMESTAMP_FORMAT,
+ if (INFORMIX_MODE(compat))
+ {
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
+ ECPGset_noind_null(ECPGt_timestamp, &tres);
+ }
+ else
+ {
+ ECPGraise(lineno, ECPG_TIMESTAMP_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
- }
- else
- {
- if (isarray && *scan_length == '"')
- scan_length++;
-
- if (garbage_left(isarray, scan_length, compat))
+ else
{
- ECPGraise(lineno, ECPG_TIMESTAMP_FORMAT,
+ if (isarray && *scan_length == '"')
+ scan_length++;
+
+ if (garbage_left(isarray, scan_length, compat))
+ {
+ ECPGraise(lineno, ECPG_TIMESTAMP_FORMAT,
ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
- return (false);
+ return (false);
+ }
}
- }
- *((timestamp *) (var + offset * act_tuple)) = tres;
- pval = scan_length;
- }
- break;
+ *((timestamp *) (var + offset * act_tuple)) = tres;
+ pval = scan_length;
+ }
+ break;
- default:
- ECPGraise(lineno, ECPG_UNSUPPORTED,
- ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
- ECPGtype_name(type));
- return (false);
- break;
- }
- if (isarray == ECPG_ARRAY_ARRAY)
- {
- bool string = false;
+ default:
+ ECPGraise(lineno, ECPG_UNSUPPORTED,
+ ECPG_SQLSTATE_ECPG_INTERNAL_ERROR,
+ ECPGtype_name(type));
+ return (false);
+ break;
+ }
+ if (isarray == ECPG_ARRAY_ARRAY)
+ {
+ bool string = false;
- /* set array to next entry */
- ++act_tuple;
+ /* set array to next entry */
+ ++act_tuple;
- /* set pval to the next entry */
- for (; string || (*pval != ',' && *pval != '}' && *pval != '\0'); ++pval)
- if (*pval == '"')
- string = string ? false : true;
+ /* set pval to the next entry */
+ for (; string || (*pval != ',' && *pval != '}' && *pval != '\0'); ++pval)
+ if (*pval == '"')
+ string = string ? false : true;
- if (*pval == ',')
- ++pval;
- }
- else if (isarray == ECPG_ARRAY_VECTOR)
- {
- bool string = false;
+ if (*pval == ',')
+ ++pval;
+ }
+ else if (isarray == ECPG_ARRAY_VECTOR)
+ {
+ bool string = false;
- /* set array to next entry */
- ++act_tuple;
+ /* set array to next entry */
+ ++act_tuple;
- /* set pval to the next entry */
- for (; string || (*pval != ' ' && *pval != '\0'); ++pval)
- if (*pval == '"')
- string = string ? false : true;
+ /* set pval to the next entry */
+ for (; string || (*pval != ' ' && *pval != '\0'); ++pval)
+ if (*pval == '"')
+ string = string ? false : true;
- if (*pval == ' ')
- ++pval;
- }
+ if (*pval == ' ')
+ ++pval;
+ }
}
} while (*pval != '\0' && ((isarray == ECPG_ARRAY_ARRAY && *pval != '}') || isarray == ECPG_ARRAY_VECTOR));
/* dynamic SQL support routines
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.19 2006/08/23 13:57:27 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.20 2006/10/04 00:30:11 momjian Exp $
*/
#define POSTGRES_ECPG_INTERNAL
/* allocate storage if needed */
if (arrsize == 0 && *(void **) var == NULL)
{
- void *mem = (void *) ECPGalloc(offset * ntuples, lineno);
+ void *mem = (void *) ECPGalloc(offset * ntuples, lineno);
+
if (!mem)
return false;
*(void **) var = mem;
ECPGfree(oldlocale);
}
else if (data_var.ind_type != ECPGt_NO_INDICATOR && data_var.ind_pointer != NULL)
- /* ind_type != NO_INDICATOR should always have ind_pointer != NULL but since this might be changed manually in the .c file let's play it safe */
+
+ /*
+ * ind_type != NO_INDICATOR should always have ind_pointer != NULL but
+ * since this might be changed manually in the .c file let's play it
+ * safe
+ */
{
/*
* this is like ECPGstore_result but since we don't have a data
/* allocate storage if needed */
if (data_var.ind_arrsize == 0 && data_var.ind_value == NULL)
{
- void *mem = (void *) ECPGalloc(data_var.ind_offset * ntuples, lineno);
+ void *mem = (void *) ECPGalloc(data_var.ind_offset * ntuples, lineno);
+
if (!mem)
return false;
*(void **) data_var.ind_pointer = mem;
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.14 2006/08/02 13:43:23 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/error.c,v 1.15 2006/10/04 00:30:11 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
sqlca->sqlcode = ECPG_PGSQL;
ECPGlog("raising sqlstate %.*s (sqlcode: %d) in line %d, '%s'.\n",
- sizeof(sqlca->sqlstate), sqlca->sqlstate, sqlca->sqlcode, line, sqlca->sqlerrm.sqlerrmc);
+ sizeof(sqlca->sqlstate), sqlca->sqlstate, sqlca->sqlcode, line, sqlca->sqlerrm.sqlerrmc);
/* free all memory we have allocated for the user */
ECPGfree_auto_mem();
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.61 2006/08/23 12:01:52 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.62 2006/10/04 00:30:11 momjian Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
quote_postgres(char *arg, bool quote, int lineno)
{
char *res;
- int i, ri = 0;
+ int i,
+ ri = 0;
- /* if quote is false we just need to store things in a descriptor
- * they will be quoted once they are inserted in a statement */
+ /*
+ * if quote is false we just need to store things in a descriptor they
+ * will be quoted once they are inserted in a statement
+ */
if (!quote)
return res = ECPGstrdup(arg, lineno);
else
return (res);
/*
- * We don't know if the target database is using
- * standard_conforming_strings, so we always use E'' strings.
- */
+ * We don't know if the target database is using
+ * standard_conforming_strings, so we always use E'' strings.
+ */
if (strchr(arg, '\\') != NULL)
res[ri++] = ESCAPE_STRING_SYNTAX;
new_entry->isarray = isarray;
new_entry->next = *cache;
*cache = new_entry;
- return(true);
+ return (true);
}
static enum ARRAY_TYPE
#define not_an_array_in_ecpg ECPG_ARRAY_NONE
/* populate cache with well known types to speed things up */
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BOOLOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BYTEAOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CHAROID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), NAMEOID, not_an_array_in_ecpg, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT8OID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT2OID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT2VECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT4OID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), REGPROCOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TEXTOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), OIDOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIDOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), XIDOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIDOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), OIDVECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), POINTOID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), LSEGOID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), PATHOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BOXOID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), POLYGONOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), LINEOID, ECPG_ARRAY_VECTOR, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), FLOAT4OID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), FLOAT8OID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), ABSTIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), RELTIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TINTERVALOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), UNKNOWNOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIRCLEOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CASHOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INETOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIDROID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BPCHAROID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), VARCHAROID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), DATEOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMEOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMESTAMPOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMESTAMPTZOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INTERVALOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMETZOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), ZPBITOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), VARBITOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
- if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), NUMERICOID, ECPG_ARRAY_NONE, stmt->lineno)) return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BOOLOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BYTEAOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CHAROID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), NAMEOID, not_an_array_in_ecpg, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT8OID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT2OID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT2VECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INT4OID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), REGPROCOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TEXTOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), OIDOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIDOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), XIDOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIDOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), OIDVECTOROID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), POINTOID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), LSEGOID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), PATHOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BOXOID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), POLYGONOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), LINEOID, ECPG_ARRAY_VECTOR, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), FLOAT4OID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), FLOAT8OID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), ABSTIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), RELTIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TINTERVALOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), UNKNOWNOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIRCLEOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CASHOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INETOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), CIDROID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), BPCHAROID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), VARCHAROID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), DATEOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMEOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMESTAMPOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMESTAMPTZOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), INTERVALOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), TIMETZOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), ZPBITOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), VARBITOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
+ if (!ECPGtypeinfocache_push(&(stmt->connection->cache_head), NUMERICOID, ECPG_ARRAY_NONE, stmt->lineno))
+ return (ECPG_ARRAY_ERROR);
}
for (cache_entry = (stmt->connection->cache_head); cache_entry != NULL; cache_entry = cache_entry->next)
}
if (**tobeinserted_p == '\0')
{
- int asize = var->arrsize? var->arrsize : 1;
+ int asize = var->arrsize ? var->arrsize : 1;
switch (var->type)
{
case ECPGt_numeric:
{
char *str = NULL;
- int slen;
+ int slen;
numeric *nval;
if (var->arrsize > 1)
nval = PGTYPESnumeric_new();
if (!nval)
return false;
-
+
if (var->type == ECPGt_numeric)
PGTYPESnumeric_copy((numeric *) ((var + var->offset * element)->value), nval);
else
* the first %s
*/
if (!(newcopy = (char *) ECPGalloc(strlen(copiedquery)
- + strlen(tobeinserted)
- + 1, stmt->lineno)))
+ + strlen(tobeinserted)
+ + 1, stmt->lineno)))
{
ECPGfree(copiedquery);
return false;
* in the string
*/
ECPGraise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS,
- ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS,
+ ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS,
NULL);
ECPGfree(copiedquery);
ECPGfree(newcopy);
break;
case PGRES_COPY_OUT:
{
- char *buffer;
- int res;
+ char *buffer;
+ int res;
+
ECPGlog("ECPGexecute line %d: Got PGRES_COPY_OUT\n", stmt->lineno);
while ((res = PQgetCopyData(stmt->connection->connection,
- &buffer, 0)) > 0)
+ &buffer, 0)) > 0)
{
printf("%s", buffer);
PQfreemem(buffer);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.19 2006/08/23 12:01:52 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/extern.h,v 1.20 2006/10/04 00:30:11 momjian Exp $ */
#ifndef _ECPG_LIB_EXTERN_H
#define _ECPG_LIB_EXTERN_H
bool ECPGget_data(const PGresult *, int, int, int, enum ECPGttype type,
enum ECPGttype, char *, char *, long, long, long,
enum ARRAY_TYPE, enum COMPAT_MODE, bool);
+
#ifdef ENABLE_THREAD_SAFETY
void ecpg_pthreads_init(void);
#endif
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.31 2006/08/15 06:40:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.32 2006/10/04 00:30:11 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
if (simple_debug)
{
- int bufsize = strlen(format) + 100;
- char *f = (char *) malloc(bufsize);
+ int bufsize = strlen(format) + 100;
+ char *f = (char *) malloc(bufsize);
if (f == NULL)
{
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.17 2006/09/05 10:00:52 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/prepare.c,v 1.18 2006/10/04 00:30:11 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
*ptr = '?';
for (++ptr; *ptr && isvarchar(*ptr); ptr++)
*ptr = ' ';
- if (*ptr == '\0') /* we reached the end */
- ptr--; /* since we will ptr++ in the top level for loop */
+ if (*ptr == '\0') /* we reached the end */
+ ptr--; /* since we will ptr++ in the top level for
+ * loop */
}
}
}
this->name = ECPGstrdup(name, lineno);
this->stmt = stmt;
ECPGlog("ECPGprepare line %d: QUERY: %s\n", stmt->lineno, stmt->command);
-
+
if (prep_stmts == NULL)
this->next = NULL;
/*
* this is a small part of c.h since we don't want to leak all postgres
* definitions into ecpg programs
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpglib.h,v 1.69 2006/08/04 16:54:05 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpglib.h,v 1.70 2006/10/04 00:30:11 momjian Exp $
*/
#ifndef _ECPGLIB_H
{
#endif
-void ECPGinit_sqlca(struct sqlca_t *sqlca);
+void ECPGinit_sqlca(struct sqlca_t * sqlca);
void ECPGdebug(int, FILE *);
bool ECPGstatus(int, const char *);
bool ECPGsetcommit(int, const char *, const char *);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_date.h,v 1.10 2006/09/14 08:02:38 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_date.h,v 1.11 2006/10/04 00:30:11 momjian Exp $ */
#ifndef PGTYPES_DATETIME
#define PGTYPES_DATETIME
#endif
extern date *PGTYPESdate_new(void);
-extern void PGTYPESdate_free(date *);
+extern void PGTYPESdate_free(date *);
extern date PGTYPESdate_from_asc(char *, char **);
extern char *PGTYPESdate_to_asc(date);
extern date PGTYPESdate_from_timestamp(timestamp);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_interval.h,v 1.12 2006/09/14 08:02:38 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/include/pgtypes_interval.h,v 1.13 2006/10/04 00:30:11 momjian Exp $ */
#ifndef PGTYPES_INTERVAL
#define PGTYPES_INTERVAL
#ifndef HAVE_INT64
typedef long long int int64;
#endif
-#else /* not HAVE_LONG_INT_64 and not
- * HAVE_LONG_LONG_INT_64 */
+#else /* not HAVE_LONG_INT_64 and not
+ * HAVE_LONG_LONG_INT_64 */
/* Won't actually work, but fall back to long int so that code compiles */
#ifndef HAVE_INT64
#endif
#define INT64_IS_BUSTED
-#endif /* not HAVE_LONG_INT_64 and not HAVE_LONG_LONG_INT_64 */
+#endif /* not HAVE_LONG_INT_64 and not
+ * HAVE_LONG_LONG_INT_64 */
#if defined(USE_INTEGER_DATETIMES) && !defined(INT64_IS_BUSTED)
#define HAVE_INT64_TIMESTAMP
#endif
-
-#endif /* C_H */
+#endif /* C_H */
typedef struct
{
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/datetime.c,v 1.31 2006/09/14 08:02:38 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/datetime.c,v 1.32 2006/10/04 00:30:11 momjian Exp $ */
#include "postgres_fe.h"
date *
PGTYPESdate_new(void)
{
- date *result;
+ date *result;
+
result = (date *) pgtypes_alloc(sizeof(date));
/* result can be NULL if we run out of memory */
return result;
}
void
-PGTYPESdate_free(date *d)
+PGTYPESdate_free(date * d)
{
free(d);
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/interval.c,v 1.35 2006/09/14 08:02:38 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/interval.c,v 1.36 2006/10/04 00:30:11 momjian Exp $ */
#include "postgres_fe.h"
#include
tm->tm_mday += val * 7;
if (fval != 0)
{
- int extra_days;
+ int extra_days;
+
fval *= 7;
extra_days = (int32) fval;
tm->tm_mday += extra_days;
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
tm->tm_mon += val;
if (fval != 0)
{
- int day;
+ int day;
+
fval *= DAYS_PER_MONTH;
day = fval;
tm->tm_mday += day;
if (fval != 0)
{
int sec;
+
fval *= SECS_PER_DAY;
sec = fval;
tm->tm_sec += sec;
interval *
PGTYPESinterval_new(void)
{
- interval *result;
+ interval *result;
+
result = (interval *) pgtypes_alloc(sizeof(interval));
/* result can be NULL if we run out of memory */
return result;
}
void
-PGTYPESinterval_free(interval *intvl)
+PGTYPESinterval_free(interval * intvl)
{
free(intvl);
}
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/numeric.c,v 1.32 2006/08/15 06:40:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/pgtypeslib/numeric.c,v 1.33 2006/10/04 00:30:12 momjian Exp $ */
#include "postgres_fe.h"
#include
PGTYPESdecimal_new(void)
{
decimal *var;
+
if ((var = (decimal *) pgtypes_alloc(sizeof(decimal))) == NULL)
return NULL;
char *realptr;
char **ptr = (endptr != NULL) ? endptr : &realptr;
- if (!value)
+ if (!value)
return (NULL);
ret = set_var_from_str(str, ptr, value);
char *
PGTYPESnumeric_to_asc(numeric *num, int dscale)
{
- numeric *numcopy = PGTYPESnumeric_new();
- char *s;
+ numeric *numcopy = PGTYPESnumeric_new();
+ char *s;
if (dscale < 0)
dscale = num->dscale;
{
char buffer[100];
numeric *tmp;
- int i;
+ int i;
if (sprintf(buffer, "%f", d) == 0)
return -1;
char *tmp;
double val;
char *endptr;
- numeric *varcopy = PGTYPESnumeric_new();
+ numeric *varcopy = PGTYPESnumeric_new();
if (PGTYPESnumeric_copy(var, varcopy) < 0)
{
int
PGTYPESnumeric_to_long(numeric *nv, long *lp)
{
- char *s = PGTYPESnumeric_to_asc(nv, 0);
- char *endptr;
+ char *s = PGTYPESnumeric_to_asc(nv, 0);
+ char *endptr;
if (s == NULL)
return -1;
replace_type = PGTYPES_TYPE_NOTHING;
switch (*p)
{
- /* the abbreviated name of the day in the week */
- /* XXX should be locale aware */
+ /* the abbreviated name of the day in the week */
+ /* XXX should be locale aware */
case 'a':
replace_val.str_val = pgtypes_date_weekdays_short[dow];
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* the full name of the day in the week */
- /* XXX should be locale aware */
+ /* the full name of the day in the week */
+ /* XXX should be locale aware */
case 'A':
replace_val.str_val = days[dow];
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* the abbreviated name of the month */
- /* XXX should be locale aware */
+ /* the abbreviated name of the month */
+ /* XXX should be locale aware */
case 'b':
case 'h':
replace_val.str_val = months[tm->tm_mon];
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* the full name name of the month */
- /* XXX should be locale aware */
+ /* the full name name of the month */
+ /* XXX should be locale aware */
case 'B':
replace_val.str_val = pgtypes_date_months[tm->tm_mon];
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* The preferred date and time representation for the
- * current locale. */
+
+ /*
+ * The preferred date and time representation for
+ * the current locale.
+ */
case 'c':
/* XXX */
break;
- /* the century number with leading zeroes */
+ /* the century number with leading zeroes */
case 'C':
replace_val.uint_val = tm->tm_year / 100;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* day with leading zeroes (01 - 31) */
+ /* day with leading zeroes (01 - 31) */
case 'd':
replace_val.uint_val = tm->tm_mday;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* the date in the format mm/dd/yy */
+ /* the date in the format mm/dd/yy */
case 'D':
/*
if (i)
return i;
break;
- /* day with leading spaces (01 - 31) */
+ /* day with leading spaces (01 - 31) */
case 'e':
replace_val.uint_val = tm->tm_mday;
replace_type = PGTYPES_TYPE_UINT_2_LS;
break;
- /*
- * alternative format modifier
- */
+
+ /*
+ * alternative format modifier
+ */
case 'E':
{
char tmp[4] = "%Ex";
if (*p == '\0')
return -1;
tmp[2] = *p;
-
+
/*
* strftime's month is 0 based, ours is 1 based
*/
replace_type = PGTYPES_TYPE_NOTHING;
break;
}
- /*
- * The ISO 8601 year with century as a decimal number. The
- * 4-digit year corresponding to the ISO week number.
- */
+
+ /*
+ * The ISO 8601 year with century as a decimal number. The
+ * 4-digit year corresponding to the ISO week number.
+ */
case 'G':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%G", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /*
- * Like %G, but without century, i.e., with a 2-digit year
- * (00-99).
- */
+
+ /*
+ * Like %G, but without century, i.e., with a 2-digit year
+ * (00-99).
+ */
case 'g':
{
char *fmt = "%g"; /* Keep compiler quiet about
replace_type = PGTYPES_TYPE_NOTHING;
}
break;
- /* hour (24 hour clock) with leading zeroes */
+ /* hour (24 hour clock) with leading zeroes */
case 'H':
replace_val.uint_val = tm->tm_hour;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* hour (12 hour clock) with leading zeroes */
+ /* hour (12 hour clock) with leading zeroes */
case 'I':
replace_val.uint_val = tm->tm_hour % 12;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /*
- * The day of the year as a decimal number with leading zeroes.
- * It ranges from 001 to 366.
- */
+
+ /*
+ * The day of the year as a decimal number with leading
+ * zeroes. It ranges from 001 to 366.
+ */
case 'j':
replace_val.uint_val = tm->tm_yday;
replace_type = PGTYPES_TYPE_UINT_3_LZ;
break;
- /*
- * The hour (24 hour clock). Leading zeroes will be turned into
- * spaces.
- */
+
+ /*
+ * The hour (24 hour clock). Leading zeroes will be turned
+ * into spaces.
+ */
case 'k':
replace_val.uint_val = tm->tm_hour;
replace_type = PGTYPES_TYPE_UINT_2_LS;
break;
- /*
- * The hour (12 hour clock). Leading zeroes will be turned into
- * spaces.
- */
+
+ /*
+ * The hour (12 hour clock). Leading zeroes will be turned
+ * into spaces.
+ */
case 'l':
replace_val.uint_val = tm->tm_hour % 12;
replace_type = PGTYPES_TYPE_UINT_2_LS;
break;
- /* The month as a decimal number with a leading zero */
+ /* The month as a decimal number with a leading zero */
case 'm':
replace_val.uint_val = tm->tm_mon;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* The minute as a decimal number with a leading zero */
+ /* The minute as a decimal number with a leading zero */
case 'M':
replace_val.uint_val = tm->tm_min;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* A newline character */
+ /* A newline character */
case 'n':
replace_val.char_val = '\n';
replace_type = PGTYPES_TYPE_CHAR;
break;
- /* the AM/PM specifier (uppercase) */
- /* XXX should be locale aware */
+ /* the AM/PM specifier (uppercase) */
+ /* XXX should be locale aware */
case 'p':
if (tm->tm_hour < 12)
replace_val.str_val = "AM";
replace_val.str_val = "PM";
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* the AM/PM specifier (lowercase) */
- /* XXX should be locale aware */
+ /* the AM/PM specifier (lowercase) */
+ /* XXX should be locale aware */
case 'P':
if (tm->tm_hour < 12)
replace_val.str_val = "am";
replace_val.str_val = "pm";
replace_type = PGTYPES_TYPE_STRING_CONSTANT;
break;
- /* the time in the format %I:%M:%S %p */
- /* XXX should be locale aware */
+ /* the time in the format %I:%M:%S %p */
+ /* XXX should be locale aware */
case 'r':
i = dttofmtasc_replace(ts, dDate, dow, tm,
q, pstr_len,
if (i)
return i;
break;
- /* The time in 24 hour notation (%H:%M) */
+ /* The time in 24 hour notation (%H:%M) */
case 'R':
i = dttofmtasc_replace(ts, dDate, dow, tm,
q, pstr_len,
if (i)
return i;
break;
- /* The number of seconds since the Epoch (1970-01-01) */
+ /* The number of seconds since the Epoch (1970-01-01) */
case 's':
#ifdef HAVE_INT64_TIMESTAMP
replace_val.int64_val = (*ts - SetEpochTimestamp()) / 1000000.0;
replace_type = PGTYPES_TYPE_DOUBLE_NF;
#endif
break;
- /* seconds as a decimal number with leading zeroes */
+ /* seconds as a decimal number with leading zeroes */
case 'S':
replace_val.uint_val = tm->tm_sec;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* A tabulator */
+ /* A tabulator */
case 't':
replace_val.char_val = '\t';
replace_type = PGTYPES_TYPE_CHAR;
break;
- /* The time in 24 hour notation (%H:%M:%S) */
+ /* The time in 24 hour notation (%H:%M:%S) */
case 'T':
i = dttofmtasc_replace(ts, dDate, dow, tm,
q, pstr_len,
if (i)
return i;
break;
- /* The day of the week as a decimal, Monday = 1, Sunday = 7 */
+
+ /*
+ * The day of the week as a decimal, Monday = 1, Sunday =
+ * 7
+ */
case 'u':
replace_val.uint_val = dow;
if (replace_val.uint_val == 0)
replace_val.uint_val = 7;
replace_type = PGTYPES_TYPE_UINT;
break;
- /* The week number of the year as a decimal number */
+ /* The week number of the year as a decimal number */
case 'U':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%U", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /*
- * The ISO 8601:1988 week number of the current year as a
- * decimal number.
- */
+
+ /*
+ * The ISO 8601:1988 week number of the current year as a
+ * decimal number.
+ */
case 'V':
i = strftime(q, *pstr_len, "%V", tm);
if (i == 0)
}
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /*
- * The day of the week as a decimal, Sunday being 0 and
- * Monday 1.
- */
+
+ /*
+ * The day of the week as a decimal, Sunday being 0 and
+ * Monday 1.
+ */
case 'w':
replace_val.uint_val = dow;
replace_type = PGTYPES_TYPE_UINT;
break;
- /* The week number of the year (another definition) */
+ /* The week number of the year (another definition) */
case 'W':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%U", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /*
- * The preferred date representation for the current locale
- * without the time.
- */
+
+ /*
+ * The preferred date representation for the current
+ * locale without the time.
+ */
case 'x':
{
char *fmt = "%x"; /* Keep compiler quiet about
replace_type = PGTYPES_TYPE_NOTHING;
}
break;
- /*
- * The preferred time representation for the current locale
- * without the date.
- */
+
+ /*
+ * The preferred time representation for the current
+ * locale without the date.
+ */
case 'X':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%X", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /* The year without the century (2 digits, leading zeroes) */
+ /* The year without the century (2 digits, leading zeroes) */
case 'y':
replace_val.uint_val = tm->tm_year % 100;
replace_type = PGTYPES_TYPE_UINT_2_LZ;
break;
- /* The year with the century (4 digits) */
+ /* The year with the century (4 digits) */
case 'Y':
replace_val.uint_val = tm->tm_year;
replace_type = PGTYPES_TYPE_UINT;
break;
- /* The time zone offset from GMT */
+ /* The time zone offset from GMT */
case 'z':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%z", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /* The name or abbreviation of the time zone */
+ /* The name or abbreviation of the time zone */
case 'Z':
tm->tm_mon -= 1;
i = strftime(q, *pstr_len, "%Z", tm);
tm->tm_mon += 1;
replace_type = PGTYPES_TYPE_NOTHING;
break;
- /* A % sign */
+ /* A % sign */
case '%':
replace_val.char_val = '%';
replace_type = PGTYPES_TYPE_CHAR;
break;
case '\0':
/* fmtstr: foo%' - The string ends with a % sign */
+
/*
* this is not compliant to the specification
*/
return -1;
default:
+
/*
* if we don't know the pattern, we just copy it
*/
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/output.c,v 1.18 2006/08/15 06:40:19 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/output.c,v 1.19 2006/10/04 00:30:12 momjian Exp $ */
#include "postgres_fe.h"
void
output_line_number(void)
{
- char *line = hashline_number();
-
+ char *line = hashline_number();
+
/* output_escaped_str(line); */
fprintf(yyout, "%s", line);
free(line);
static void
output_escaped_str(char *str)
{
- int i, len = strlen(str);
+ int i,
+ len = strlen(str);
/* output this char by char as we have to filter " and \n */
for (i = 0; i < len; i++)
fputs("\\\"", yyout);
else if (str[i] == '\n')
fputs("\\\n", yyout);
- else if (str[i] == '\r' && str[i+1] == '\n')
+ else if (str[i] == '\r' && str[i + 1] == '\n')
{
fputs("\\\r\n", yyout);
i++;
#include
#include
-int main(void)
+int
+main(void)
{
- char t1[] = "abc def ghi ";
- /* 123456789012345 */
- char buf[50];
- int k;
+ char t1[] = "abc def ghi ";
+
+ /* 123456789012345 */
+ char buf[50];
+ int k;
printf("t1: _%s_\n", t1);
rupshift(t1);
ECPG_INFORMIX_BAD_EXPONENT ?
*/
-char* decs[] = { "2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4",
- "2E-394", ".1E-2", "+.0", "-592.49E-07", "+32.84e-4",
- ".500001", "-.5000001",
- "1234567890123456789012345678.91", /* 30 digits should fit
- into decimal */
- "1234567890123456789012345678.921", /* 31 digits should NOT
- fit into decimal */
- "not a number",
- NULL};
+char *decs[] = {"2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4",
+ "2E-394", ".1E-2", "+.0", "-592.49E-07", "+32.84e-4",
+ ".500001", "-.5000001",
+ "1234567890123456789012345678.91", /* 30 digits should fit into decimal */
+ "1234567890123456789012345678.921", /* 31 digits should NOT fit into
+ * decimal */
+ "not a number",
+NULL};
static void
-check_errno(void);
+ check_errno(void);
#define BUFSIZE 200
int
main(void)
{
- decimal *dec, *din;
- char buf[BUFSIZE];
- long l;
- int i, j, k, q, r, count = 0;
- double dbl;
- decimal **decarr = (decimal **) calloc(1, sizeof(decimal));
+ decimal *dec,
+ *din;
+ char buf[BUFSIZE];
+ long l;
+ int i,
+ j,
+ k,
+ q,
+ r,
+ count = 0;
+ double dbl;
+ decimal **decarr = (decimal **) calloc(1, sizeof(decimal));
ECPGdebug(1, stderr);
decarr = realloc(decarr, sizeof(decimal *) * (count + 1));
decarr[count++] = dec;
- r = dectoasc(dec, buf, BUFSIZE-1, -1);
- if (r < 0) check_errno();
+ r = dectoasc(dec, buf, BUFSIZE - 1, -1);
+ if (r < 0)
+ check_errno();
printf("dec[%d,1]: r: %d, %s\n", i, r, buf);
- r = dectoasc(dec, buf, BUFSIZE-1, 0);
- if (r < 0) check_errno();
+ r = dectoasc(dec, buf, BUFSIZE - 1, 0);
+ if (r < 0)
+ check_errno();
printf("dec[%d,2]: r: %d, %s\n", i, r, buf);
- r = dectoasc(dec, buf, BUFSIZE-1, 1);
- if (r < 0) check_errno();
+ r = dectoasc(dec, buf, BUFSIZE - 1, 1);
+ if (r < 0)
+ check_errno();
printf("dec[%d,3]: r: %d, %s\n", i, r, buf);
- r = dectoasc(dec, buf, BUFSIZE-1, 2);
- if (r < 0) check_errno();
+ r = dectoasc(dec, buf, BUFSIZE - 1, 2);
+ if (r < 0)
+ check_errno();
printf("dec[%d,4]: r: %d, %s\n", i, r, buf);
din = PGTYPESdecimal_new();
- r = dectoasc(din, buf, BUFSIZE-1, 2);
- if (r < 0) check_errno();
+ r = dectoasc(din, buf, BUFSIZE - 1, 2);
+ if (r < 0)
+ check_errno();
printf("dec[%d,5]: r: %d, %s\n", i, r, buf);
r = dectolong(dec, &l);
- if (r) check_errno();
- printf("dec[%d,6]: %ld (r: %d)\n", i, r?0L:l, r);
+ if (r)
+ check_errno();
+ printf("dec[%d,6]: %ld (r: %d)\n", i, r ? 0L : l, r);
if (r == 0)
{
r = deccvlong(l, din);
- if (r) check_errno();
- dectoasc(din, buf, BUFSIZE-1, 2);
+ if (r)
+ check_errno();
+ dectoasc(din, buf, BUFSIZE - 1, 2);
q = deccmp(dec, din);
printf("dec[%d,7]: %s (r: %d - cmp: %d)\n", i, buf, r, q);
}
r = dectoint(dec, &k);
- if (r) check_errno();
- printf("dec[%d,8]: %d (r: %d)\n", i, r?0:k, r);
+ if (r)
+ check_errno();
+ printf("dec[%d,8]: %d (r: %d)\n", i, r ? 0 : k, r);
if (r == 0)
{
r = deccvint(k, din);
- if (r) check_errno();
- dectoasc(din, buf, BUFSIZE-1, 2);
+ if (r)
+ check_errno();
+ dectoasc(din, buf, BUFSIZE - 1, 2);
q = deccmp(dec, din);
printf("dec[%d,9]: %s (r: %d - cmp: %d)\n", i, buf, r, q);
}
r = dectodbl(dec, &dbl);
- if (r) check_errno();
- printf("dec[%d,10]: %g (r: %d)\n", i, r?0.0:dbl, r);
+ if (r)
+ check_errno();
+ printf("dec[%d,10]: %g (r: %d)\n", i, r ? 0.0 : dbl, r);
PGTYPESdecimal_free(din);
printf("\n");
decarr = realloc(decarr, sizeof(decimal *) * (count + 1));
decarr[count++] = dec;
- rsetnull(CDECIMALTYPE, (char *) decarr[count-1]);
- printf("dec[%d]: %sNULL\n", count-1,
- risnull(CDECIMALTYPE, (char *) decarr[count-1]) ? "" : "NOT ");
+ rsetnull(CDECIMALTYPE, (char *) decarr[count - 1]);
+ printf("dec[%d]: %sNULL\n", count - 1,
+ risnull(CDECIMALTYPE, (char *) decarr[count - 1]) ? "" : "NOT ");
printf("dec[0]: %sNULL\n",
- risnull(CDECIMALTYPE, (char *) decarr[0]) ? "" : "NOT ");
+ risnull(CDECIMALTYPE, (char *) decarr[0]) ? "" : "NOT ");
r = dectoasc(decarr[3], buf, -1, -1);
- check_errno(); printf("dectoasc with len == -1: r: %d\n", r);
+ check_errno();
+ printf("dectoasc with len == -1: r: %d\n", r);
r = dectoasc(decarr[3], buf, 0, -1);
- check_errno(); printf("dectoasc with len == 0: r: %d\n", r);
+ check_errno();
+ printf("dectoasc with len == 0: r: %d\n", r);
for (i = 0; i < count; i++)
{
for (j = 0; j < count; j++)
{
- decimal a, s, m, d;
- int c;
+ decimal a,
+ s,
+ m,
+ d;
+ int c;
+
c = deccmp(decarr[i], decarr[j]);
printf("dec[c,%d,%d]: %d\n", i, j, c);
}
else
{
- dectoasc(&a, buf, BUFSIZE-1, -1);
+ dectoasc(&a, buf, BUFSIZE - 1, -1);
printf("dec[a,%d,%d]: %s\n", i, j, buf);
}
}
else
{
- dectoasc(&s, buf, BUFSIZE-1, -1);
+ dectoasc(&s, buf, BUFSIZE - 1, -1);
printf("dec[s,%d,%d]: %s\n", i, j, buf);
}
}
else
{
- dectoasc(&m, buf, BUFSIZE-1, -1);
+ dectoasc(&m, buf, BUFSIZE - 1, -1);
printf("dec[m,%d,%d]: %s\n", i, j, buf);
}
}
else
{
- dectoasc(&d, buf, BUFSIZE-1, -1);
+ dectoasc(&d, buf, BUFSIZE - 1, -1);
printf("dec[d,%d,%d]: %s\n", i, j, buf);
}
}
for (i = 0; i < count; i++)
{
- dectoasc(decarr[i], buf, BUFSIZE-1, -1);
+ dectoasc(decarr[i], buf, BUFSIZE - 1, -1);
printf("%d: %s\n", i, buf);
}
static void
check_errno(void)
{
- switch(errno)
+ switch (errno)
{
case 0:
printf("(no errno set) - ");
static void
-check_return(int ret);
+ check_return(int ret);
static void
date_test_strdate(char *input)
{
- static int i;
- date d;
- int r, q;
- char dbuf[11];
+ static int i;
+ date d;
+ int r,
+ q;
+ char dbuf[11];
r = rstrdate(input, &d);
printf("r: %d ", r);
static void
date_test_defmt(char *fmt, char *input)
{
- static int i;
- char dbuf[11];
- date d;
- int q, r;
+ static int i;
+ char dbuf[11];
+ date d;
+ int q,
+ r;
r = rdefmtdate(&d, fmt, input);
printf("r: %d ", r);
static void
date_test_fmt(date d, char *fmt)
{
- static int i;
- char buf[200];
- int r;
+ static int i;
+ char buf[200];
+ int r;
r = rfmtdate(d, fmt, buf);
printf("r: %d ", r);
int
main(void)
{
- short mdy[3] = { 11, 23, 1959 };
- char dbuf[11];
- date d;
- int r;
+ short mdy[3] = {11, 23, 1959};
+ char dbuf[11];
+ date d;
+ int r;
ECPGdebug(1, stderr);
static void
check_return(int ret)
{
- switch(ret)
+ switch (ret)
{
case ECPG_INFORMIX_ENOTDMY:
printf("(ECPG_INFORMIX_ENOTDMY)");
static void
-check_return(int ret);
+ check_return(int ret);
static void
fmtlong(long lng, char *fmt)
{
- static int i;
- int r;
- char buf[30];
+ static int i;
+ int r;
+ char buf[30];
r = rfmtlong(lng, fmt, buf);
printf("r: %d ", r);
static void
check_return(int ret)
{
- switch(ret)
+ switch (ret)
{
case ECPG_INFORMIX_ENOTDMY:
printf("(ECPG_INFORMIX_ENOTDMY)");
printf("null: %d\n", risnull(type, ptr));
}
-int main(void)
+int
+main(void)
{
-
+
#line 15 "rnull.pgc"
- char c [] = "abc " ;
+ char c[] = "abc ";
#line 15 "rnull.pgc"
-
+
#line 16 "rnull.pgc"
- short s = 17 ;
+ short s = 17;
#line 16 "rnull.pgc"
-
+
#line 17 "rnull.pgc"
- int i = - 74874 ;
+ int i = -74874;
#line 17 "rnull.pgc"
-
+
#line 18 "rnull.pgc"
- bool b = 1 ;
+ bool b = 1;
#line 18 "rnull.pgc"
-
+
#line 19 "rnull.pgc"
- float f = 3.71 ;
+ float f = 3.71;
#line 19 "rnull.pgc"
-
+
#line 20 "rnull.pgc"
- long l = 487444 ;
+ long l = 487444;
#line 20 "rnull.pgc"
-
+
#line 21 "rnull.pgc"
- double dbl = 404.404 ;
+ double dbl = 404.404;
#line 21 "rnull.pgc"
-
+
#line 22 "rnull.pgc"
- decimal dec ;
+ decimal dec;
#line 22 "rnull.pgc"
-
+
#line 23 "rnull.pgc"
- date dat ;
+ date dat;
#line 23 "rnull.pgc"
-
+
#line 24 "rnull.pgc"
- timestamp tmp ;
+ timestamp tmp;
#line 24 "rnull.pgc"
#line 27 "rnull.pgc"
- { ECPGconnect(__LINE__, 1, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 1, "regress1", NULL, NULL, NULL, 0);
#line 29 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "rnull.pgc"
- { ECPGdo(__LINE__, 1, 0, NULL, "create table test ( id int , c char ( 10 ) , s smallint , i int , b bool , f float , l bigint , dbl double precision , dec decimal , dat date , tmp timestamptz ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "create table test ( id int , c char ( 10 ) , s smallint , i int , b bool , f float , l bigint , dbl double precision , dec decimal , dat date , tmp timestamptz ) ", ECPGt_EOIT, ECPGt_EORT);
#line 33 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "rnull.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 34 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "rnull.pgc"
- { ECPGdo(__LINE__, 1, 0, NULL, "insert into test ( id , c , s , i , b , f , l , dbl ) values( 1 , ? , ? , ? , ? , ? , ? , ? ) ",
- ECPGt_char,(c),(long)sizeof("abc "),(long)1,(sizeof("abc "))*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(s),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_bool,&(b),(long)1,(long)1,sizeof(bool),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_float,&(f),(long)1,(long)1,sizeof(float),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_long,&(l),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_double,&(dbl),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "insert into test ( id , c , s , i , b , f , l , dbl ) values( 1 , ? , ? , ? , ? , ? , ? , ? ) ",
+ ECPGt_char, (c), (long) sizeof("abc "), (long) 1, (sizeof("abc ")) *sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(s), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_bool, &(b), (long) 1, (long) 1, sizeof(bool),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_float, &(f), (long) 1, (long) 1, sizeof(float),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_long, &(l), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_double, &(dbl), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 38 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 38 "rnull.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 39 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "rnull.pgc"
rsetnull(CDATETYPE, (char *) &dat);
rsetnull(CDTIMETYPE, (char *) &tmp);
- { ECPGdo(__LINE__, 1, 0, NULL, "insert into test ( id , c , s , i , b , f , l , dbl , dec , dat , tmp ) values( 2 , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) ",
- ECPGt_char,(c),(long)sizeof("abc "),(long)1,(sizeof("abc "))*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(s),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_bool,&(b),(long)1,(long)1,sizeof(bool),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_float,&(f),(long)1,(long)1,sizeof(float),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_long,&(l),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_double,&(dbl),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_decimal,&(dec),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_date,&(dat),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(tmp),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "insert into test ( id , c , s , i , b , f , l , dbl , dec , dat , tmp ) values( 2 , ? , ? , ? , ? , ? , ? , ? , ? , ? , ? ) ",
+ ECPGt_char, (c), (long) sizeof("abc "), (long) 1, (sizeof("abc ")) *sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(s), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_bool, &(b), (long) 1, (long) 1, sizeof(bool),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_float, &(f), (long) 1, (long) 1, sizeof(float),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_long, &(l), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_double, &(dbl), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_decimal, &(dec), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_date, &(dat), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(tmp), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 54 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 54 "rnull.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 55 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 55 "rnull.pgc"
printf("first select\n");
- { ECPGdo(__LINE__, 1, 0, NULL, "select c , s , i , b , f , l , dbl , dec , dat , tmp from test where id = 1 ", ECPGt_EOIT,
- ECPGt_char,(c),(long)sizeof("abc "),(long)1,(sizeof("abc "))*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(s),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_bool,&(b),(long)1,(long)1,sizeof(bool),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_float,&(f),(long)1,(long)1,sizeof(float),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_long,&(l),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_double,&(dbl),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_decimal,&(dec),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_date,&(dat),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(tmp),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "select c , s , i , b , f , l , dbl , dec , dat , tmp from test where id = 1 ", ECPGt_EOIT,
+ ECPGt_char, (c), (long) sizeof("abc "), (long) 1, (sizeof("abc ")) *sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(s), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_bool, &(b), (long) 1, (long) 1, sizeof(bool),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_float, &(f), (long) 1, (long) 1, sizeof(float),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_long, &(l), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_double, &(dbl), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_decimal, &(dec), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_date, &(dat), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(tmp), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 61 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 61 "rnull.pgc"
printf("second select\n");
- { ECPGdo(__LINE__, 1, 0, NULL, "select c , s , i , b , f , l , dbl , dec , dat , tmp from test where id = 2 ", ECPGt_EOIT,
- ECPGt_char,(c),(long)sizeof("abc "),(long)1,(sizeof("abc "))*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(s),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_bool,&(b),(long)1,(long)1,sizeof(bool),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_float,&(f),(long)1,(long)1,sizeof(float),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_long,&(l),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_double,&(dbl),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_decimal,&(dec),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_date,&(dat),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(tmp),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "select c , s , i , b , f , l , dbl , dec , dat , tmp from test where id = 2 ", ECPGt_EOIT,
+ ECPGt_char, (c), (long) sizeof("abc "), (long) 1, (sizeof("abc ")) *sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(s), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_bool, &(b), (long) 1, (long) 1, sizeof(bool),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_float, &(f), (long) 1, (long) 1, sizeof(float),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_long, &(l), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_double, &(dbl), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_decimal, &(dec), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_date, &(dat), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(tmp), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 78 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 78 "rnull.pgc"
test_null(CDATETYPE, (char *) &dat);
test_null(CDTIMETYPE, (char *) &tmp);
- { ECPGdo(__LINE__, 1, 0, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 0, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
#line 91 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 91 "rnull.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 92 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 92 "rnull.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 94 "rnull.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 94 "rnull.pgc"
static void openit(void);
-static void dosqlprint(void) {
+static void
+dosqlprint(void)
+{
printf("doSQLprint: Error: %s\n", sqlca.sqlerrm.sqlerrmc);
}
-int main(void)
+int
+main(void)
{
-
+
#line 14 "test_informix.pgc"
- int i = 14 ;
+ int i = 14;
#line 14 "test_informix.pgc"
-
-
+
+
#line 15 "test_informix.pgc"
- decimal j , m , n ;
+ decimal j,
+ m,
+ n;
#line 15 "test_informix.pgc"
ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do dosqlprint ( ) ; */
+ /* exec sql whenever sqlerror do dosqlprint ( ) ; */
#line 18 "test_informix.pgc"
- { ECPGconnect(__LINE__, 1, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 1, "regress1", NULL, NULL, NULL, 0);
#line 20 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 20 "test_informix.pgc"
- if (sqlca.sqlcode != 0) exit(1);
+ if (sqlca.sqlcode != 0)
+ exit(1);
- { ECPGdo(__LINE__, 1, 1, NULL, "create table test ( i int primary key , j int ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "create table test ( i int primary key , j int ) ", ECPGt_EOIT, ECPGt_EORT);
#line 23 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 23 "test_informix.pgc"
/* this INSERT works */
- rsetnull(CDECIMALTYPE, (char *)&j);
- { ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( 7 , ? ) ",
- ECPGt_decimal,&(j),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ rsetnull(CDECIMALTYPE, (char *) &j);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( 7 , ? ) ",
+ ECPGt_decimal, &(j), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 27 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 27 "test_informix.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 28 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 28 "test_informix.pgc"
/* this INSERT should fail because i is a unique column */
- { ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( 7 , 12 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( 7 , 12 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 31 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 31 "test_informix.pgc"
printf("INSERT: %ld=%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- if (sqlca.sqlcode != 0) { ECPGtrans(__LINE__, NULL, "rollback");
+ if (sqlca.sqlcode != 0)
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 33 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 33 "test_informix.pgc"
- { ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( ? , 1 ) ",
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "insert into test ( i , j ) values( ? , 1 ) ",
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 35 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 35 "test_informix.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 36 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 36 "test_informix.pgc"
/* this will fail (more than one row in subquery) */
- { ECPGdo(__LINE__, 1, 1, NULL, "select i from test where j = ( select j from test ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select i from test where j = ( select j from test ) ", ECPGt_EOIT, ECPGt_EORT);
#line 39 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 39 "test_informix.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 40 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 40 "test_informix.pgc"
/* this however should be ok */
- { ECPGdo(__LINE__, 1, 1, NULL, "select i from test where j = ( select j from test order by i limit 1 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select i from test where j = ( select j from test order by i limit 1 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 43 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 43 "test_informix.pgc"
printf("SELECT: %ld=%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- if (sqlca.sqlcode != 0) { ECPGtrans(__LINE__, NULL, "rollback");
+ if (sqlca.sqlcode != 0)
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 45 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 45 "test_informix.pgc"
- ECPG_informix_set_var( 0, &( i ), __LINE__);\
- /* declare c cursor for select * from test where i <= ? */
+ ECPG_informix_set_var(0, &(i), __LINE__);
+ \
+ /* declare c cursor for select * from test where i <= ? */
#line 47 "test_informix.pgc"
- openit();
+ openit();
deccvint(0, &j);
while (1)
{
- { ECPGdo(__LINE__, 1, 1, NULL, "fetch forward from c", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_decimal,&(j),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "fetch forward from c", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_decimal, &(j), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 54 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 54 "test_informix.pgc"
- if (sqlca.sqlcode == 100) break;
- else if (sqlca.sqlcode != 0) printf ("Error: %ld\n", sqlca.sqlcode);
+ if (sqlca.sqlcode == 100)
+ break;
+ else if (sqlca.sqlcode != 0)
+ printf("Error: %ld\n", sqlca.sqlcode);
- if (risnull(CDECIMALTYPE, (char *)&j))
+ if (risnull(CDECIMALTYPE, (char *) &j))
printf("%d NULL\n", i);
else
{
- int a;
+ int a;
dectoint(&j, &a);
printf("%d %d\n", i, a);
deccvint(7, &j);
deccvint(14, &m);
decadd(&j, &m, &n);
- { ECPGdo(__LINE__, 1, 1, NULL, "delete from test where i = ? ",
- ECPGt_decimal,&(n),(long)1,(long)1,sizeof(decimal),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "delete from test where i = ? ",
+ ECPGt_decimal, &(n), (long) 1, (long) 1, sizeof(decimal),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 72 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 72 "test_informix.pgc"
printf("DELETE: %ld\n", sqlca.sqlcode);
- { ECPGdo(__LINE__, 1, 1, NULL, "select 1 from test where i = 14 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select 1 from test where i = 14 ", ECPGt_EOIT, ECPGt_EORT);
#line 75 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 75 "test_informix.pgc"
printf("Exists: %ld\n", sqlca.sqlcode);
- { ECPGdo(__LINE__, 1, 1, NULL, "select 1 from test where i = 147 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select 1 from test where i = 147 ", ECPGt_EOIT, ECPGt_EORT);
#line 78 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 78 "test_informix.pgc"
printf("Does not exist: %ld\n", sqlca.sqlcode);
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 81 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 81 "test_informix.pgc"
- { ECPGdo(__LINE__, 1, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
#line 82 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 82 "test_informix.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 83 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 83 "test_informix.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 85 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 85 "test_informix.pgc"
return 0;
}
-static void openit(void)
+static void
+openit(void)
{
- { ECPGdo(__LINE__, 1, 1, NULL, "declare c cursor for select * from test where i <= ? ",
- ECPGt_int,&(*( int *)(ECPG_informix_get_var( 0))),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "declare c cursor for select * from test where i <= ? ",
+ ECPGt_int, &(*(int *) (ECPG_informix_get_var(0))), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 92 "test_informix.pgc"
-if (sqlca.sqlcode < 0) dosqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ dosqlprint();
+ }
#line 92 "test_informix.pgc"
}
-
#ifdef __cplusplus
}
#endif
-
#endif
#line 5 "test_informix2.pgc"
/* Check SQLCODE, and produce a "standard error" if it's wrong! */
-static void sql_check(char *fn, char *caller, int ignore)
+static void
+sql_check(char *fn, char *caller, int ignore)
{
- char errorstring[255];
-
- if (SQLCODE == ignore)
- return;
- else
- {
- if (SQLCODE != 0)
- {
-
- sprintf(errorstring, "**SQL error %ld doing '%s' in function '%s'. [%s]",
- SQLCODE, caller, fn, sqlca.sqlerrm.sqlerrmc);
- fprintf(stderr, "%s", errorstring);
- printf("%s\n", errorstring);
-
- /* attempt a ROLLBACK */
- { ECPGtrans(__LINE__, NULL, "rollback");}
+ char errorstring[255];
+
+ if (SQLCODE == ignore)
+ return;
+ else
+ {
+ if (SQLCODE != 0)
+ {
+
+ sprintf(errorstring, "**SQL error %ld doing '%s' in function '%s'. [%s]",
+ SQLCODE, caller, fn, sqlca.sqlerrm.sqlerrmc);
+ fprintf(stderr, "%s", errorstring);
+ printf("%s\n", errorstring);
+
+ /* attempt a ROLLBACK */
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
+ }
#line 27 "test_informix2.pgc"
- if (SQLCODE == 0)
- {
- sprintf(errorstring, "Rollback successful.\n");
- } else {
- sprintf(errorstring, "Rollback failed with code %ld.\n", SQLCODE);
- }
+ if (SQLCODE == 0)
+ {
+ sprintf(errorstring, "Rollback successful.\n");
+ }
+ else
+ {
+ sprintf(errorstring, "Rollback failed with code %ld.\n", SQLCODE);
+ }
- fprintf(stderr, "%s", errorstring);
- printf("%s\n", errorstring);
+ fprintf(stderr, "%s", errorstring);
+ printf("%s\n", errorstring);
- exit(1);
- }
- }
+ exit(1);
+ }
+ }
}
-int main(void)
+int
+main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
-
+
+
+
+
+
+
#line 49 "test_informix2.pgc"
- int c ;
-
+ int c;
+
#line 50 "test_informix2.pgc"
- timestamp d ;
-
+ timestamp d;
+
#line 51 "test_informix2.pgc"
- timestamp e ;
-
+ timestamp e;
+
#line 52 "test_informix2.pgc"
- timestamp maxd ;
-
+ timestamp maxd;
+
#line 53 "test_informix2.pgc"
- char dbname [ 30 ] ;
+ char dbname[30];
+
/* exec sql end declare section */
#line 54 "test_informix2.pgc"
- interval *intvl;
+ interval *intvl;
/* exec sql whenever sqlerror sqlprint ; */
#line 58 "test_informix2.pgc"
ECPGdebug(1, stderr);
strcpy(dbname, "regress1");
- { ECPGconnect(__LINE__, 1, dbname , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 1, dbname, NULL, NULL, NULL, 0);
#line 63 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 63 "test_informix2.pgc"
sql_check("main", "connect", 0);
- { ECPGdo(__LINE__, 1, 1, NULL, "set DateStyle to 'DMY'", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "set DateStyle to 'DMY'", ECPGt_EOIT, ECPGt_EORT);
#line 66 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 66 "test_informix2.pgc"
- { ECPGdo(__LINE__, 1, 1, NULL, "create table history ( customerid integer , timestamp timestamp without time zone , action_taken char ( 5 ) , narrative varchar ( 100 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "create table history ( customerid integer , timestamp timestamp without time zone , action_taken char ( 5 ) , narrative varchar ( 100 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 68 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 68 "test_informix2.pgc"
sql_check("main", "create", 0);
-
- { ECPGdo(__LINE__, 1, 1, NULL, "insert into history ( customerid , timestamp , action_taken , narrative ) values( 1 , '2003-05-07 13:28:34 CEST' , 'test' , 'test' ) ", ECPGt_EOIT, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "insert into history ( customerid , timestamp , action_taken , narrative ) values( 1 , '2003-05-07 13:28:34 CEST' , 'test' , 'test' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 73 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 73 "test_informix2.pgc"
sql_check("main", "insert", 0);
- { ECPGdo(__LINE__, 1, 1, NULL, "select max ( timestamp ) from history ", ECPGt_EOIT,
- ECPGt_timestamp,&(maxd),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select max ( timestamp ) from history ", ECPGt_EOIT,
+ ECPGt_timestamp, &(maxd), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 78 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 78 "test_informix2.pgc"
sql_check("main", "select max", 100);
- { ECPGdo(__LINE__, 1, 1, NULL, "select customerid , timestamp from history where timestamp = ? limit 1 ",
- ECPGt_timestamp,&(maxd),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_int,&(c),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(d),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "select customerid , timestamp from history where timestamp = ? limit 1 ",
+ ECPGt_timestamp, &(maxd), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_int, &(c), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(d), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 85 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 85 "test_informix2.pgc"
sql_check("main", "select", 0);
c++;
- { ECPGdo(__LINE__, 1, 1, NULL, "insert into history ( customerid , timestamp , action_taken , narrative ) values( ? , ? , 'test' , 'test' ) ",
- ECPGt_int,&(c),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(e),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "insert into history ( customerid , timestamp , action_taken , narrative ) values( ? , ? , 'test' , 'test' ) ",
+ ECPGt_int, &(c), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(e), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 97 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 97 "test_informix2.pgc"
sql_check("main", "update", 0);
-
- { ECPGtrans(__LINE__, NULL, "commit");
+
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 100 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 100 "test_informix2.pgc"
- { ECPGdo(__LINE__, 1, 1, NULL, "drop table history ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 1, 1, NULL, "drop table history ", ECPGt_EOIT, ECPGt_EORT);
#line 102 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 102 "test_informix2.pgc"
sql_check("main", "drop", 0);
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 105 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 105 "test_informix2.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 107 "test_informix2.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 107 "test_informix2.pgc"
sql_check("main", "disconnect", 0);
exit(0);
/*
- Table "public.history"
- Column | Type | Modifiers
+ Table "public.history"
+ Column | Type | Modifiers
--------------+-----------------------------+-----------
- customerid | integer | not null
- timestamp | timestamp without time zone | not null
- action_taken | character(5) | not null
- narrative | character varying(100) |
+ customerid | integer | not null
+ timestamp | timestamp without time zone | not null
+ action_taken | character(5) | not null
+ narrative | character varying(100) |
*/
}
main(void)
{
/* exec sql begin declare section */
-
-
+
+
#line 16 "test1.pgc"
- char db [ 200 ] ;
-
+ char db[200];
+
#line 17 "test1.pgc"
- char pw [ 200 ] ;
+ char pw[200];
+
/* exec sql end declare section */
#line 18 "test1.pgc"
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 22 "test1.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "alter user connectuser encrypted password 'connectpw'", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "alter user connectuser encrypted password 'connectpw'", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 23 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 24 "test1.pgc"
- /* <-- "main" not specified */
+ /* <-- "main" not specified */
- { ECPGconnect(__LINE__, 0, "connectdb@localhost" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb@localhost", NULL, NULL, "main", 0);
+ }
#line 26 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 27 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "@localhost" , "connectdb" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "@localhost", "connectdb", NULL, "main", 0);
+ }
#line 29 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 30 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb@localhost:55432" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb@localhost:55432", NULL, NULL, "main", 0);
+ }
#line 32 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 33 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "@localhost:55432" , "connectdb" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "@localhost:55432", "connectdb", NULL, "main", 0);
+ }
#line 35 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 36 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb:55432" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb:55432", NULL, NULL, "main", 0);
+ }
#line 38 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 39 "test1.pgc"
- { ECPGconnect(__LINE__, 0, ":55432" , "connectdb" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, ":55432", "connectdb", NULL, "main", 0);
+ }
#line 41 "test1.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 42 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/connectdb" , "connectuser" , "connectpw" , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/connectdb", "connectuser", "connectpw", NULL, 0);
+ }
#line 44 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 45 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/" , "connectdb" , NULL , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/", "connectdb", NULL, NULL, 0);
+ }
#line 47 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 48 "test1.pgc"
strcpy(pw, "connectpw");
strcpy(db, "tcp:postgresql://localhost:55432/connectdb");
- { ECPGconnect(__LINE__, 0, db , "connectuser" , pw , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, db, "connectuser", pw, NULL, 0);
+ }
#line 52 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 53 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb" , "connectuser" , "connectpw" , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb", "connectuser", "connectpw", NULL, 0);
+ }
#line 55 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 56 "test1.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb" , "connectuser" , NULL , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb", "connectuser", NULL, NULL, 0);
+ }
#line 58 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 59 "test1.pgc"
/* wrong db */
- { ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/nonexistant" , "connectuser" , "connectpw" , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:55432/nonexistant", "connectuser", "connectpw", NULL, 0);
+ }
#line 62 "test1.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 63 "test1.pgc"
/* wrong port */
- { ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:20/connectdb" , "connectuser" , "connectpw" , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "tcp:postgresql://localhost:20/connectdb", "connectuser", "connectpw", NULL, 0);
+ }
#line 66 "test1.pgc"
/* no disconnect necessary */
/* wrong password */
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb" , "connectuser" , "wrongpw" , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost:55432/connectdb", "connectuser", "wrongpw", NULL, 0);
+ }
#line 70 "test1.pgc"
/* no disconnect necessary */
main(void)
{
/* exec sql begin declare section */
-
-
+
+
#line 17 "test2.pgc"
- char id [ 200 ] ;
-
+ char id[200];
+
#line 18 "test2.pgc"
- char res [ 200 ] ;
+ char res[200];
+
/* exec sql end declare section */
#line 19 "test2.pgc"
ECPGdebug(1, stderr);
strcpy(id, "first");
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , id, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, id, 0);
+ }
#line 24 "test2.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , "second", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, "second", 0);
+ }
#line 25 "test2.pgc"
/* this selects from "second" which was opened last */
- { ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 28 "test2.pgc"
- { ECPGdo(__LINE__, 0, 1, "first", "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, "first", "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 29 "test2.pgc"
- { ECPGdo(__LINE__, 0, 1, "second", "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, "second", "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 30 "test2.pgc"
- { ECPGsetconn(__LINE__, "first");}
+ {
+ ECPGsetconn(__LINE__, "first");
+ }
#line 32 "test2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 33 "test2.pgc"
/* this will disconnect from "first" */
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 36 "test2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 37 "test2.pgc"
/* error here since "first" is already disconnected */
- { ECPGdisconnect(__LINE__, id);}
+ {
+ ECPGdisconnect(__LINE__, id);
+ }
#line 40 "test2.pgc"
/* disconnect from "second" */
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 43 "test2.pgc"
main(void)
{
/* exec sql begin declare section */
-
-
+
+
#line 16 "test3.pgc"
- char id [ 200 ] ;
-
+ char id[200];
+
#line 17 "test3.pgc"
- char res [ 200 ] ;
+ char res[200];
+
/* exec sql end declare section */
#line 18 "test3.pgc"
ECPGdebug(1, stderr);
strcpy(id, "first");
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , id, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, id, 0);
+ }
#line 23 "test3.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , "second", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, "second", 0);
+ }
#line 24 "test3.pgc"
/* this selects from "second" which was opened last */
- { ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 27 "test3.pgc"
/* will close "second" */
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 30 "test3.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
- ECPGt_char,(res),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select current_database () ", ECPGt_EOIT,
+ ECPGt_char, (res), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 31 "test3.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , "second", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, "second", 0);
+ }
#line 33 "test3.pgc"
/* will close "second" */
- { ECPGdisconnect(__LINE__, "DEFAULT");}
+ {
+ ECPGdisconnect(__LINE__, "DEFAULT");
+ }
#line 35 "test3.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "second", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "second", 0);
+ }
#line 37 "test3.pgc"
- { ECPGdisconnect(__LINE__, "ALL");}
+ {
+ ECPGdisconnect(__LINE__, "ALL");
+ }
#line 38 "test3.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 40 "test3.pgc"
- { ECPGdisconnect(__LINE__, "DEFAULT");}
+ {
+ ECPGdisconnect(__LINE__, "DEFAULT");
+ }
#line 41 "test3.pgc"
- { ECPGdisconnect(__LINE__, "ALL");}
+ {
+ ECPGdisconnect(__LINE__, "ALL");
+ }
#line 42 "test3.pgc"
/*
- * exec sql disconnect;
- * exec sql disconnect name;
+ * exec sql disconnect; exec sql disconnect name;
*
- * are used in other tests
+ * are used in other tests
*/
return (0);
{
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, "main", 0);
+ }
#line 13 "test4.pgc"
- { ECPGsetconn(__LINE__, "main");}
+ {
+ ECPGsetconn(__LINE__, "main");
+ }
#line 15 "test4.pgc"
- { ECPGdisconnect(__LINE__, "DEFAULT");}
+ {
+ ECPGdisconnect(__LINE__, "DEFAULT");
+ }
#line 17 "test4.pgc"
main(void)
{
/* exec sql begin declare section */
-
-
+
+
#line 16 "test5.pgc"
- char db [ 200 ] ;
-
+ char db[200];
+
#line 17 "test5.pgc"
- char id [ 200 ] ;
+ char id[200];
+
/* exec sql end declare section */
#line 18 "test5.pgc"
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 22 "test5.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "alter user connectuser encrypted password 'connectpw'", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "alter user connectuser encrypted password 'connectpw'", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 23 "test5.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 24 "test5.pgc"
- /* <-- "main" not specified */
+ /* <-- "main" not specified */
strcpy(db, "connectdb");
strcpy(id, "main");
- { ECPGconnect(__LINE__, 0, db , NULL,NULL , id, 0); }
+ {
+ ECPGconnect(__LINE__, 0, db, NULL, NULL, id, 0);
+ }
#line 28 "test5.pgc"
- { ECPGdisconnect(__LINE__, id);}
+ {
+ ECPGdisconnect(__LINE__, id);
+ }
#line 29 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 31 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 32 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 34 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 35 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 37 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 38 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "" , "connectdb" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "", "connectdb", NULL, "main", 0);
+ }
#line 40 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 41 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , "connectuser" , "connectdb" , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", "connectuser", "connectdb", "main", 0);
+ }
#line 43 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 44 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb" , "connectuser" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb", "connectuser", NULL, "main", 0);
+ }
#line 46 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 47 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb" , "connectuser" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb", "connectuser", NULL, "main", 0);
+ }
#line 49 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 50 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb" , "connectuser" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/connectdb", "connectuser", NULL, "main", 0);
+ }
#line 52 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 53 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://200.46.204.71/connectdb" , "connectuser" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://200.46.204.71/connectdb", "connectuser", NULL, "main", 0);
+ }
#line 55 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 56 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/" , "connectdb" , NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "unix:postgresql://localhost/", "connectdb", NULL, "main", 0);
+ }
#line 58 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 59 "test5.pgc"
/* connect twice */
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 62 "test5.pgc"
- { ECPGconnect(__LINE__, 0, "connectdb" , NULL,NULL , "main", 0); }
+ {
+ ECPGconnect(__LINE__, 0, "connectdb", NULL, NULL, "main", 0);
+ }
#line 63 "test5.pgc"
- { ECPGdisconnect(__LINE__, "main");}
+ {
+ ECPGdisconnect(__LINE__, "main");
+ }
#line 64 "test5.pgc"
/* not connected */
- { ECPGdisconnect(__LINE__, "nonexistant");}
+ {
+ ECPGdisconnect(__LINE__, "nonexistant");
+ }
#line 67 "test5.pgc"
main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
+
+
+
+
+
#line 14 "dt_test.pgc"
- date date1 ;
-
+ date date1;
+
#line 15 "dt_test.pgc"
- timestamp ts1 ;
-
+ timestamp ts1;
+
#line 16 "dt_test.pgc"
- interval * iv1 , iv2 ;
-
+ interval *iv1,
+ iv2;
+
#line 17 "dt_test.pgc"
- char * text ;
+ char *text;
+
/* exec sql end declare section */
#line 18 "dt_test.pgc"
- date date2;
- int mdy[3] = { 4, 19, 1998 };
- char *fmt, *out, *in;
- char *d1 = "Mon Jan 17 1966";
- char *t1 = "2000-7-12 17:34:29";
- int i;
-
- ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do sqlprint ( ) ; */
+ date date2;
+ int mdy[3] = {4, 19, 1998};
+ char *fmt,
+ *out,
+ *in;
+ char *d1 = "Mon Jan 17 1966";
+ char *t1 = "2000-7-12 17:34:29";
+ int i;
+
+ ECPGdebug(1, stderr);
+ /* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 27 "dt_test.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 28 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 28 "dt_test.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table date_test ( d date , ts timestamp ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table date_test ( d date , ts timestamp ) ", ECPGt_EOIT, ECPGt_EORT);
#line 29 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "dt_test.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to iso", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to iso", ECPGt_EOIT, ECPGt_EORT);
#line 30 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 30 "dt_test.pgc"
- date1 = PGTYPESdate_from_asc(d1, NULL);
- ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
+ date1 = PGTYPESdate_from_asc(d1, NULL);
+ ts1 = PGTYPEStimestamp_from_asc(t1, NULL);
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into date_test ( d , ts ) values( ? , ? ) ",
- ECPGt_date,&(date1),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(ts1),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into date_test ( d , ts ) values( ? , ? ) ",
+ ECPGt_date, &(date1), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(ts1), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 35 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "dt_test.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from date_test where d = ? ",
- ECPGt_date,&(date1),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_date,&(date1),(long)1,(long)1,sizeof(date),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_timestamp,&(ts1),(long)1,(long)1,sizeof(timestamp),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from date_test where d = ? ",
+ ECPGt_date, &(date1), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_date, &(date1), (long) 1, (long) 1, sizeof(date),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_timestamp, &(ts1), (long) 1, (long) 1, sizeof(timestamp),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 37 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "dt_test.pgc"
text = PGTYPESdate_to_asc(date1);
- printf ("Date: %s\n", text);
+
+ printf("Date: %s\n", text);
free(text);
text = PGTYPEStimestamp_to_asc(ts1);
- printf ("timestamp: %s\n", text);
+
+ printf("timestamp: %s\n", text);
free(text);
iv1 = PGTYPESinterval_from_asc("13556 days 12 hours 34 minutes 14 seconds ", NULL);
PGTYPESinterval_copy(iv1, &iv2);
text = PGTYPESinterval_to_asc(&iv2);
- printf ("interval: %s\n", text);
+
+ printf("interval: %s\n", text);
free(text);
PGTYPESdate_mdyjul(mdy, &date2);
ts1 = PGTYPEStimestamp_from_asc("2003-12-04 17:34:29", NULL);
text = PGTYPEStimestamp_to_asc(ts1);
+
fmt = "(ddd), mmm. dd, yyyy, repeat: (ddd), mmm. dd, yyyy. end";
- out = (char*) malloc(strlen(fmt) + 1);
+ out = (char *) malloc(strlen(fmt) + 1);
date1 = PGTYPESdate_from_timestamp(ts1);
PGTYPESdate_fmt_asc(date1, fmt, out);
printf("date_day of %s is %d\n", text, PGTYPESdate_dayofweek(date1));
/* rdate_defmt_asc() */
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "yy/mm/dd";
in = "In the year 1995, the month of December, it is the 25th day";
- /* 0123456789012345678901234567890123456789012345678901234567890
- * 0 1 2 3 4 5 6
+
+ /*
+ * 0123456789012345678901234567890123456789012345678901234567890 0
+ * 1 2 3 4 5 6
*/
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc1: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mmmm. dd. yyyy";
in = "12/25/95";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc2: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "yy/mm/dd";
in = "95/12/25";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc3: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "yy/mm/dd";
in = "1995, December 25th";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc4: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "dd-mm-yy";
in = "This is 25th day of December, 1995";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc5: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mmddyy";
in = "Dec. 25th, 1995";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc6: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mmm. dd. yyyy";
in = "dec 25th 1995";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc7: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mmm. dd. yyyy";
in = "DEC-25-1995";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc8: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mm yy dd.";
in = "12199525";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc9: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "yyyy fierj mm dd.";
in = "19951225";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc10: %s\n", text);
free(text);
- date1 = 0; text = "";
+ date1 = 0;
+ text = "";
+
fmt = "mm/dd/yy";
in = "122595";
PGTYPESdate_defmt_asc(&date1, fmt, in);
text = PGTYPESdate_to_asc(date1);
+
printf("date_defmt_asc12: %s\n", text);
free(text);
PGTYPEStimestamp_current(&ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
/* can't output this in regression mode */
/* printf("timestamp_current: Now: %s\n", text); */
free(text);
ts1 = PGTYPEStimestamp_from_asc("96-02-29", NULL);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_to_asc1: %s\n", text);
free(text);
ts1 = PGTYPEStimestamp_from_asc("1994-02-11 3:10:35", NULL);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_to_asc2: %s\n", text);
free(text);
ts1 = PGTYPEStimestamp_from_asc("1994-02-11 26:10:35", NULL);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_to_asc3: %s\n", text);
free(text);
/* abc-03:10:35-def-02/11/94-gh */
-/* 12345678901234567890123456789 */
+/* 12345678901234567890123456789 */
- out = (char*) malloc(32);
+ out = (char *) malloc(32);
i = PGTYPEStimestamp_fmt_asc(&ts1, out, 31, "abc-%X-def-%x-ghi%%");
printf("timestamp_fmt_asc: %d: %s\n", i, out);
free(out);
fmt = "This is a %m/%d/%y %H-%Ml%Stest";
- in = "This is a 4/12/80 3-39l12test";
+ in = "This is a 4/12/80 3-39l12test";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %d %H:%M:%S %z %Y";
- in = "Tue Jul 22 17:28:44 +0200 2003";
+ in = "Tue Jul 22 17:28:44 +0200 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %d %H:%M:%S %z %Y";
- in = "Tue Feb 29 17:28:44 +0200 2000";
+ in = "Tue Feb 29 17:28:44 +0200 2000";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %d %H:%M:%S %z %Y";
- in = "Tue Feb 29 17:28:44 +0200 1900";
+ in = "Tue Feb 29 17:28:44 +0200 1900";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %d %H:%M:%S %z %Y";
- in = "Tue Feb 29 17:28:44 +0200 1996";
+ in = "Tue Feb 29 17:28:44 +0200 1996";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%b %d %H:%M:%S %z %Y";
- in = " Jul 31 17:28:44 +0200 1996";
+ in = " Jul 31 17:28:44 +0200 1996";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%b %d %H:%M:%S %z %Y";
- in = " Jul 32 17:28:44 +0200 1996";
+ in = " Jul 32 17:28:44 +0200 1996";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %d %H:%M:%S %z %Y";
- in = "Tue Feb 29 17:28:44 +0200 1997";
+ in = "Tue Feb 29 17:28:44 +0200 1997";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "%";
- in = "Tue Jul 22 17:28:44 +0200 2003";
+ in = "Tue Jul 22 17:28:44 +0200 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "a %";
- in = "Tue Jul 22 17:28:44 +0200 2003";
+ in = "Tue Jul 22 17:28:44 +0200 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "%b, %d %H_%M`%S %z %Y";
- in = " Jul, 22 17_28 `44 +0200 2003 ";
+ in = " Jul, 22 17_28 `44 +0200 2003 ";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %%%d %H:%M:%S %Z %Y";
- in = "Tue Jul %22 17:28:44 CEST 2003";
+ in = "Tue Jul %22 17:28:44 CEST 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%a %b %%%d %H:%M:%S %Z %Y";
- in = "Tue Jul %22 17:28:44 CEST 2003";
+ in = "Tue Jul %22 17:28:44 CEST 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "abc%n %C %B %%%d %H:%M:%S %Z %Y";
- in = "abc\n 19 October %22 17:28:44 CEST 2003";
+ in = "abc\n 19 October %22 17:28:44 CEST 2003";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "abc%n %C %B %%%d %H:%M:%S %Z %y";
- in = "abc\n 18 October %34 17:28:44 CEST 80";
+ in = "abc\n 18 October %34 17:28:44 CEST 80";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = "";
- in = "abc\n 18 October %34 17:28:44 CEST 80";
+ in = "abc\n 18 October %34 17:28:44 CEST 80";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error (should be error!): %d\n", in, fmt, text, i);
free(text);
fmt = NULL;
- in = "1980-04-12 3:49:44 ";
+ in = "1980-04-12 3:49:44 ";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, NULL) = %s, error: %d\n", in, text, i);
free(text);
fmt = "%B %d, %Y. Time: %I:%M%p";
- in = "July 14, 1988. Time: 9:15am";
+ in = "July 14, 1988. Time: 9:15am";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%B %d at %I:%M %p in the year %Y";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%Y, %B %d. Time: %I:%M %p";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%Y, %B %d. Time: %I:%M%p";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
fmt = "%Y, %P %B %d. Time: %I:%M";
i = PGTYPEStimestamp_defmt_asc(in, fmt, &ts1);
text = PGTYPEStimestamp_to_asc(ts1);
+
printf("timestamp_defmt_asc(%s, %s) = %s, error: %d\n", in, fmt, text, i);
free(text);
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 350 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 350 "dt_test.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 351 "dt_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 351 "dt_test.pgc"
return (0);
}
-
#line 8 "dt_test2.pgc"
-char *dates[] = { "19990108foobar",
- "19990108 foobar",
- "1999-01-08 foobar",
- "January 8, 1999",
- "1999-01-08",
- "1/8/1999",
- "1/18/1999",
- "01/02/03",
- "1999-Jan-08",
- "Jan-08-1999",
- "08-Jan-1999",
- "99-Jan-08",
- "08-Jan-99",
- "08-Jan-06",
- "Jan-08-99",
- "19990108",
- "990108",
- "1999.008",
- "J2451187",
- "January 8, 99 BC",
- NULL };
-
-char *times[] = { "0:04",
- "1:59 PDT",
- "13:24:40 -8:00",
- "13:24:40.495+3",
- NULL };
-
-char *intervals[] = { "1 minute",
- "1 12:59:10",
- "2 day 12 hour 59 minute 10 second",
- "1 days 12 hrs 59 mins 10 secs",
- "1 days 1 hours 1 minutes 1 seconds",
- "1 year 59 mins",
- "1 year 59 mins foobar",
- NULL };
+char *dates[] = {"19990108foobar",
+ "19990108 foobar",
+ "1999-01-08 foobar",
+ "January 8, 1999",
+ "1999-01-08",
+ "1/8/1999",
+ "1/18/1999",
+ "01/02/03",
+ "1999-Jan-08",
+ "Jan-08-1999",
+ "08-Jan-1999",
+ "99-Jan-08",
+ "08-Jan-99",
+ "08-Jan-06",
+ "Jan-08-99",
+ "19990108",
+ "990108",
+ "1999.008",
+ "J2451187",
+ "January 8, 99 BC",
+NULL};
+
+char *times[] = {"0:04",
+ "1:59 PDT",
+ "13:24:40 -8:00",
+ "13:24:40.495+3",
+NULL};
+
+char *intervals[] = {"1 minute",
+ "1 12:59:10",
+ "2 day 12 hour 59 minute 10 second",
+ "1 days 12 hrs 59 mins 10 secs",
+ "1 days 1 hours 1 minutes 1 seconds",
+ "1 year 59 mins",
+ "1 year 59 mins foobar",
+NULL};
int
main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
-
+
+
+
+
+
+
#line 51 "dt_test2.pgc"
- date date1 ;
-
+ date date1;
+
#line 52 "dt_test2.pgc"
- timestamp ts1 , ts2 ;
-
+ timestamp ts1,
+ ts2;
+
#line 53 "dt_test2.pgc"
- char * text ;
-
+ char *text;
+
#line 54 "dt_test2.pgc"
- interval * i1 ;
-
+ interval *i1;
+
#line 55 "dt_test2.pgc"
- date * dc ;
+ date *dc;
+
/* exec sql end declare section */
#line 56 "dt_test2.pgc"
- int i, j;
- char *endptr;
+ int i,
+ j;
+ char *endptr;
ECPGdebug(1, stderr);
dc = PGTYPESdate_new();
*dc = date1;
text = PGTYPESdate_to_asc(*dc);
+
printf("Date of timestamp: %s\n", text);
free(text);
PGTYPESdate_free(dc);
for (i = 0; dates[i]; i++)
{
- bool err = false;
+ bool err = false;
+
date1 = PGTYPESdate_from_asc(dates[i], &endptr);
- if (date1 == INT_MIN) {
+ if (date1 == INT_MIN)
+ {
err = true;
}
text = PGTYPESdate_to_asc(date1);
+
printf("Date[%d]: %s (%c - %c)\n",
- i, err ? "-" : text,
- endptr ? 'N' : 'Y',
- err ? 'T' : 'F');
+ i, err ? "-" : text,
+ endptr ? 'N' : 'Y',
+ err ? 'T' : 'F');
free(text);
if (!err)
{
for (j = 0; times[j]; j++)
{
- int length = strlen(dates[i])
- + 1
- + strlen(times[j])
- + 1;
- char* t = malloc(length);
+ int length = strlen(dates[i])
+ + 1
+ + strlen(times[j])
+ + 1;
+ char *t = malloc(length);
+
sprintf(t, "%s %s", dates[i], times[j]);
ts1 = PGTYPEStimestamp_from_asc(t, NULL);
text = PGTYPEStimestamp_to_asc(ts1);
- if (i != 19 || j != 3) /* timestamp as integer or double differ for this case */
+
+ if (i != 19 || j != 3) /* timestamp as integer or double
+ * differ for this case */
printf("TS[%d,%d]: %s\n",
- i, j, errno ? "-" : text);
+ i, j, errno ? "-" : text);
free(text);
}
}
for (i = 0; intervals[i]; i++)
{
- interval *ic;
+ interval *ic;
+
i1 = PGTYPESinterval_from_asc(intervals[i], &endptr);
if (*endptr)
printf("endptr set to %s\n", endptr);
if (j < 0)
continue;
text = PGTYPESinterval_to_asc(i1);
+
printf("interval[%d]: %s\n", i, text ? text : "-");
free(text);
ic = PGTYPESinterval_new();
PGTYPESinterval_copy(i1, ic);
text = PGTYPESinterval_to_asc(i1);
+
printf("interval_copy[%d]: %s\n", i, text ? text : "-");
free(text);
PGTYPESinterval_free(ic);
return (0);
}
-
int
main(void)
{
- char *text="error\n";
- numeric *value1, *value2, *res;
+ char *text = "error\n";
+ numeric *value1,
+ *value2,
+ *res;
+
/* exec sql begin declare section */
-
- /* = {0, 0, 0, 0, 0, NULL, NULL} ; */
-
+
+ /* = {0, 0, 0, 0, 0, NULL, NULL} ; */
+
#line 22 "num_test.pgc"
- numeric * des ;
+ numeric *des;
+
/* exec sql end declare section */
#line 24 "num_test.pgc"
- double d;
- long l1, l2;
- int i;
+ double d;
+ long l1,
+ l2;
+ int i;
ECPGdebug(1, stderr);
/* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 30 "num_test.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 32 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "num_test.pgc"
- { ECPGsetcommit(__LINE__, "off", NULL);
+ {
+ ECPGsetcommit(__LINE__, "off", NULL);
#line 34 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "num_test.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( text char ( 5 ) , num numeric ( 14 , 7 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( text char ( 5 ) , num numeric ( 14 , 7 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 35 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "num_test.pgc"
value1 = PGTYPESnumeric_new();
PGTYPESnumeric_from_int(1407, value1);
text = PGTYPESnumeric_to_asc(value1, -1);
+
printf("from int = %s\n", text);
free(text);
PGTYPESnumeric_free(value1);
res = PGTYPESnumeric_new();
PGTYPESnumeric_add(value1, value2, res);
text = PGTYPESnumeric_to_asc(res, -1);
+
printf("add = %s\n", text);
free(text);
PGTYPESnumeric_sub(res, value2, res);
text = PGTYPESnumeric_to_asc(res, -1);
+
printf("sub = %s\n", text);
free(text);
PGTYPESnumeric_free(value2);
des = PGTYPESnumeric_new();
PGTYPESnumeric_copy(res, des);
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( text , num ) values( 'test' , ? ) ",
- ECPGt_numeric,&(des),(long)1,(long)0,sizeof(numeric),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( text , num ) values( 'test' , ? ) ",
+ ECPGt_numeric, &(des), (long) 1, (long) 0, sizeof(numeric),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 60 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 60 "num_test.pgc"
PGTYPESnumeric_mul(value1, value2, res);
PGTYPESnumeric_free(value2);
- { ECPGdo(__LINE__, 0, 1, NULL, "select num from test where text = 'test' ", ECPGt_EOIT,
- ECPGt_numeric,&(des),(long)1,(long)0,sizeof(numeric),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select num from test where text = 'test' ", ECPGt_EOIT,
+ ECPGt_numeric, &(des), (long) 1, (long) 0, sizeof(numeric),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 66 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 66 "num_test.pgc"
PGTYPESnumeric_mul(res, des, res);
text = PGTYPESnumeric_to_asc(res, -1);
+
printf("mul = %s\n", text);
free(text);
PGTYPESnumeric_free(des);
value2 = PGTYPESnumeric_from_asc("10000", NULL);
PGTYPESnumeric_div(res, value2, res);
text = PGTYPESnumeric_to_asc(res, -1);
+
PGTYPESnumeric_to_double(res, &d);
printf("div = %s %e\n", text, d);
PGTYPESnumeric_free(value2);
PGTYPESnumeric_free(res);
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 90 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 90 "num_test.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 91 "num_test.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 91 "num_test.pgc"
return (0);
}
-
*/
-char* nums[] = { "2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4",
- "2E-394", ".1E-2", "+.0", "-592.49E-07", "+32.84e-4",
- ".500001", "-.5000001",
- "1234567890123456789012345678.91", /* 30 digits should fit
- into decimal */
- "1234567890123456789012345678.921", /* 31 digits should NOT
- fit into decimal */
- "not a number",
- NULL};
+char *nums[] = {"2E394", "-2", ".794", "3.44", "592.49E21", "-32.84e4",
+ "2E-394", ".1E-2", "+.0", "-592.49E-07", "+32.84e-4",
+ ".500001", "-.5000001",
+ "1234567890123456789012345678.91", /* 30 digits should fit into decimal */
+ "1234567890123456789012345678.921", /* 31 digits should NOT fit into
+ * decimal */
+ "not a number",
+NULL};
static void
-check_errno(void);
+ check_errno(void);
int
main(void)
{
- char *text="error\n";
- char *endptr;
- numeric *num, *nin;
- decimal *dec;
- long l;
- int i, j, k, q, r, count = 0;
- double d;
- numeric **numarr = (numeric **) calloc(1, sizeof(numeric));
+ char *text = "error\n";
+ char *endptr;
+ numeric *num,
+ *nin;
+ decimal *dec;
+ long l;
+ int i,
+ j,
+ k,
+ q,
+ r,
+ count = 0;
+ double d;
+ numeric **numarr = (numeric **) calloc(1, sizeof(numeric));
ECPGdebug(1, stderr);
for (i = 0; nums[i]; i++)
{
num = PGTYPESnumeric_from_asc(nums[i], &endptr);
- if (!num) check_errno();
+ if (!num)
+ check_errno();
if (endptr != NULL)
{
printf("endptr of %d is not NULL\n", i);
if (*endptr != '\0')
printf("*endptr of %d is not \\0\n", i);
}
- if (!num) continue;
+ if (!num)
+ continue;
numarr = realloc(numarr, sizeof(numeric *) * (count + 1));
numarr[count++] = num;
text = PGTYPESnumeric_to_asc(num, -1);
- if (!text) check_errno();
- printf("num[%d,1]: %s\n", i, text); free(text);
+
+ if (!text)
+ check_errno();
+ printf("num[%d,1]: %s\n", i, text);
+ free(text);
text = PGTYPESnumeric_to_asc(num, 0);
- if (!text) check_errno();
- printf("num[%d,2]: %s\n", i, text); free(text);
+
+ if (!text)
+ check_errno();
+ printf("num[%d,2]: %s\n", i, text);
+ free(text);
text = PGTYPESnumeric_to_asc(num, 1);
- if (!text) check_errno();
- printf("num[%d,3]: %s\n", i, text); free(text);
+
+ if (!text)
+ check_errno();
+ printf("num[%d,3]: %s\n", i, text);
+ free(text);
text = PGTYPESnumeric_to_asc(num, 2);
- if (!text) check_errno();
- printf("num[%d,4]: %s\n", i, text); free(text);
+
+ if (!text)
+ check_errno();
+ printf("num[%d,4]: %s\n", i, text);
+ free(text);
nin = PGTYPESnumeric_new();
text = PGTYPESnumeric_to_asc(nin, 2);
- if (!text) check_errno();
- printf("num[%d,5]: %s\n", i, text); free(text);
+
+ if (!text)
+ check_errno();
+ printf("num[%d,5]: %s\n", i, text);
+ free(text);
r = PGTYPESnumeric_to_long(num, &l);
- if (r) check_errno();
- printf("num[%d,6]: %ld (r: %d)\n", i, r?0L:l, r);
+ if (r)
+ check_errno();
+ printf("num[%d,6]: %ld (r: %d)\n", i, r ? 0L : l, r);
if (r == 0)
{
r = PGTYPESnumeric_from_long(l, nin);
- if (r) check_errno();
+ if (r)
+ check_errno();
text = PGTYPESnumeric_to_asc(nin, 2);
+
q = PGTYPESnumeric_cmp(num, nin);
printf("num[%d,7]: %s (r: %d - cmp: %d)\n", i, text, r, q);
free(text);
}
r = PGTYPESnumeric_to_int(num, &k);
- if (r) check_errno();
- printf("num[%d,8]: %d (r: %d)\n", i, r?0:k, r);
+ if (r)
+ check_errno();
+ printf("num[%d,8]: %d (r: %d)\n", i, r ? 0 : k, r);
if (r == 0)
{
r = PGTYPESnumeric_from_int(k, nin);
- if (r) check_errno();
+ if (r)
+ check_errno();
text = PGTYPESnumeric_to_asc(nin, 2);
+
q = PGTYPESnumeric_cmp(num, nin);
printf("num[%d,9]: %s (r: %d - cmp: %d)\n", i, text, r, q);
free(text);
}
r = PGTYPESnumeric_to_double(num, &d);
- if (r) check_errno();
- printf("num[%d,10]: %g (r: %d)\n", i, r?0.0:d, r);
- /* do not test double to numeric because
- * - extra digits are different on different architectures
- * - PGTYPESnumeric_from_double internally calls PGTYPESnumeric_from_asc anyway
+ if (r)
+ check_errno();
+ printf("num[%d,10]: %g (r: %d)\n", i, r ? 0.0 : d, r);
+
+ /*
+ * do not test double to numeric because - extra digits are different
+ * on different architectures - PGTYPESnumeric_from_double internally
+ * calls PGTYPESnumeric_from_asc anyway
*/
dec = PGTYPESdecimal_new();
r = PGTYPESnumeric_to_decimal(num, dec);
- if (r) check_errno();
- /* we have no special routine for outputting decimal, it would
- * convert to a numeric anyway */
+ if (r)
+ check_errno();
+
+ /*
+ * we have no special routine for outputting decimal, it would convert
+ * to a numeric anyway
+ */
printf("num[%d,11]: - (r: %d)\n", i, r);
if (r == 0)
{
r = PGTYPESnumeric_from_decimal(dec, nin);
- if (r) check_errno();
+ if (r)
+ check_errno();
text = PGTYPESnumeric_to_asc(nin, 2);
+
q = PGTYPESnumeric_cmp(num, nin);
printf("num[%d,12]: %s (r: %d - cmp: %d)\n", i, text, r, q);
free(text);
{
for (j = 0; j < count; j++)
{
- numeric* a = PGTYPESnumeric_new();
- numeric* s = PGTYPESnumeric_new();
- numeric* m = PGTYPESnumeric_new();
- numeric* d = PGTYPESnumeric_new();
+ numeric *a = PGTYPESnumeric_new();
+ numeric *s = PGTYPESnumeric_new();
+ numeric *m = PGTYPESnumeric_new();
+ numeric *d = PGTYPESnumeric_new();
+
r = PGTYPESnumeric_add(numarr[i], numarr[j], a);
if (r)
{
else
{
text = PGTYPESnumeric_to_asc(a, 10);
+
printf("num[a,%d,%d]: %s\n", i, j, text);
free(text);
}
else
{
text = PGTYPESnumeric_to_asc(s, 10);
+
printf("num[s,%d,%d]: %s\n", i, j, text);
free(text);
}
else
{
text = PGTYPESnumeric_to_asc(m, 10);
+
printf("num[m,%d,%d]: %s\n", i, j, text);
free(text);
}
else
{
text = PGTYPESnumeric_to_asc(d, 10);
+
printf("num[d,%d,%d]: %s\n", i, j, text);
free(text);
}
for (i = 0; i < count; i++)
{
text = PGTYPESnumeric_to_asc(numarr[i], -1);
+
printf("%d: %s\n", i, text);
free(text);
}
static void
check_errno(void)
{
- switch(errno)
+ switch (errno)
{
case 0:
printf("(no errno set) - ");
#line 3 "comment.pgc"
-/* just a test comment */ int i;
-/* just a test comment int j*/;
+ /* just a test comment */ int i;
+
+ /* just a test comment int j*/ ;
/****************************************************************************/
-/* Test comment */
+/* Test comment */
/*--------------------------------------------------------------------------*/
-int main(void)
+int
+main(void)
{
- ECPGdebug(1, stderr);
+ ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 17 "comment.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 19 "comment.pgc"
- exit (0);
+ exit(0);
}
-
-/* exec sql type intarray is int [ 6 ] */
+/* exec sql type intarray is int [ 6 ] */
#line 13 "define.pgc"
-typedef int intarray[ 6];
+typedef int intarray[6];
int
main(void)
{
/* exec sql begin declare section */
- typedef char string [ 8 ] ;
+ typedef char string[8];
#line 21 "define.pgc"
-
-
-
-
+
+
+
+
#line 22 "define.pgc"
- intarray amount ;
-
+ intarray amount;
+
#line 23 "define.pgc"
- char name [ 6 ] [ 8 ] ;
-
+ char name[6][8];
+
#line 24 "define.pgc"
- char letter [ 6 ] [ 1 ] ;
-
+ char letter[6][1];
+
#if 0
-
+
#line 26 "define.pgc"
- int not_used ;
-
+ int not_used;
#endif
/* exec sql end declare section */
#line 29 "define.pgc"
- int i,j;
+ int i,
+ j;
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 34 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( name char ( 8 ) , amount int , letter char ( 1 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( name char ( 8 ) , amount int , letter char ( 1 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 36 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "define.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 37 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into Test ( name , amount , letter ) values ( 'false' , 1 , 'f' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into Test ( name , amount , letter ) values ( 'false' , 1 , 'f' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 39 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( name , amount , letter ) values ( 'true' , 2 , 't' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( name , amount , letter ) values ( 'true' , 2 , 't' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 40 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 40 "define.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 41 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 41 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
- ECPGt_char,(name),(long)8,(long)6,(8)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,(amount),(long)1,(long)6,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(letter),(long)1,(long)6,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
+ ECPGt_char, (name), (long) 8, (long) 6, (8) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, (amount), (long) 1, (long) 6, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (letter), (long) 1, (long) 6, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 43 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 43 "define.pgc"
- for (i=0, j=sqlca.sqlerrd[2]; i<j; i++)
+ for (i = 0, j = sqlca.sqlerrd[2]; i < j; i++)
{
/* exec sql begin declare section */
-
-
-
+
+
+
#line 48 "define.pgc"
- char n [ 8 ] , l = letter [ i ] [ 0 ] ;
-
+ char n[8],
+ l = letter[i][0];
+
#line 49 "define.pgc"
- int a = amount [ i ] ;
+ int a = amount[i];
+
/* exec sql end declare section */
#line 50 "define.pgc"
- strncpy(n, name[i], 8);
+ strncpy(n, name[i], 8);
printf("name[%d]=%8.8s\tamount[%d]=%d\tletter[%d]=%c\n", i, n, i, a, i, l);
}
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
#line 56 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 56 "define.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 57 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 57 "define.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 58 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 58 "define.pgc"
#ifdef __cplusplus
}
#endif
-
#endif
#line 1 "init.pgc"
-enum e { ENUM0, ENUM1 };
-struct sa { int member; };
+enum e
+{
+ ENUM0, ENUM1
+};
+struct sa
+{
+ int member;
+};
-static int fa(void)
+static int
+fa(void)
{
printf("in fa\n");
return 2;
return *x;
}
-static int fd(const char *x,int i)
+static int
+fd(const char *x, int i)
{
printf("in fd (%s, %d)\n", x, i);
- return (*x)*i;
+ return (*x) * i;
}
-static int fe(enum e x)
+static int
+fe(enum e x)
{
printf("in fe (%d)\n", (int) x);
- return (int)x;
+ return (int) x;
}
-static void sqlnotice(char *notice, short trans)
+static void
+sqlnotice(char *notice, short trans)
{
if (!notice)
notice = "-empty-";
#define YES 1
#ifdef _cplusplus
-namespace N
+namespace N
{
- static const int i=2;
+ static const int i = 2;
};
#endif
-int main(void)
+int
+main(void)
{
- struct sa x = { 14 },*y = &x;
+ struct sa x = {14}, *y = &x;
+
/* exec sql begin declare section */
-
-
-
-
-
-
-
-
-
-
-
- /* = 1L */
-
+
+
+
+
+
+
+
+
+
+
+
+ /* = 1L */
+
#line 60 "init.pgc"
- int a = ( int ) 2 ;
-
+ int a = (int) 2;
+
#line 61 "init.pgc"
- int b = 2 + 2 ;
-
+ int b = 2 + 2;
+
#line 62 "init.pgc"
- int b2 = ( 14 * 7 ) ;
-
+ int b2 = (14 * 7);
+
#line 63 "init.pgc"
- int d = x . member ;
-
+ int d = x.member;
+
#line 64 "init.pgc"
- int g = fb ( 2 ) ;
-
+ int g = fb(2);
+
#line 65 "init.pgc"
- int i = 3 ^ 1 ;
-
+ int i = 3 ^ 1;
+
#line 66 "init.pgc"
- int j = 1 ? 1 : 2 ;
-
+ int j = 1 ? 1 : 2;
+
#line 68 "init.pgc"
- int e = y -> member ;
-
+ int e = y->member;
+
#line 69 "init.pgc"
- int c = 10 >> 2 ;
-
+ int c = 10 >> 2;
+
#line 70 "init.pgc"
- bool h = 2 || 1 ;
-
+ bool h = 2 || 1;
+
#line 71 "init.pgc"
- long iay ;
+ long iay;
+
/* exec sql end declare section */
#line 72 "init.pgc"
- int f=fa();
+ int f = fa();
#ifdef _cplusplus
/* exec sql begin declare section */
- /* compile error */
-
+ /* compile error */
+
#line 78 "init.pgc"
- int k = N : : i ;
+ int k = N::i;
+
/* exec sql end declare section */
#line 79 "init.pgc"
-
#endif
ECPGdebug(1, stderr);
printf("%d %d %d %d %d %d %d %d %d %d %d\n", a, b, b2, c, d, e, f, g, h, i, j);
iay = 0;
printf("%ld\n", iay);
- /* exec sql whenever sqlerror do fa ( ) ; */
+ /* exec sql whenever sqlerror do fa ( ) ; */
#line 87 "init.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 88 "init.pgc"
-if (sqlca.sqlcode < 0) fa ( );}
+ if (sqlca.sqlcode < 0)
+ fa();
+ }
#line 88 "init.pgc"
/* exec sql whenever sqlerror do fb ( 20 ) ; */
#line 89 "init.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 90 "init.pgc"
-if (sqlca.sqlcode < 0) fb ( 20 );}
+ if (sqlca.sqlcode < 0)
+ fb(20);
+ }
#line 90 "init.pgc"
/* exec sql whenever sqlerror do fc ( \"50\" ) ; */
#line 91 "init.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 92 "init.pgc"
-if (sqlca.sqlcode < 0) fc ( "50" );}
+ if (sqlca.sqlcode < 0)
+ fc("50");
+ }
#line 92 "init.pgc"
/* exec sql whenever sqlerror do fd ( \"50\" , 1 ) ; */
#line 93 "init.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 94 "init.pgc"
-if (sqlca.sqlcode < 0) fd ( "50" , 1 );}
+ if (sqlca.sqlcode < 0)
+ fd("50", 1);
+ }
#line 94 "init.pgc"
/* exec sql whenever sqlerror do fe ( ENUM0 ) ; */
#line 95 "init.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 96 "init.pgc"
-if (sqlca.sqlcode < 0) fe ( ENUM0 );}
+ if (sqlca.sqlcode < 0)
+ fe(ENUM0);
+ }
#line 96 "init.pgc"
/* exec sql whenever sqlerror do sqlnotice ( NULL , 0 ) ; */
#line 97 "init.pgc"
-
- { ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select now () ", ECPGt_EOIT, ECPGt_EORT);
#line 98 "init.pgc"
-if (sqlca.sqlcode < 0) sqlnotice ( NULL , 0 );}
+ if (sqlca.sqlcode < 0)
+ sqlnotice(NULL, 0);
+ }
#line 98 "init.pgc"
return 0;
#line 4 "type.pgc"
-typedef long mmInteger ;
+typedef long mmInteger;
#line 6 "type.pgc"
#line 6 "type.pgc"
-typedef char mmChar ;
+typedef char mmChar;
#line 7 "type.pgc"
#line 7 "type.pgc"
-typedef short mmSmallInt ;
+typedef short mmSmallInt;
#line 8 "type.pgc"
#line 8 "type.pgc"
-/* exec sql type string is char [ 11 ] */
+/* exec sql type string is char [ 11 ] */
#line 10 "type.pgc"
typedef char string[11];
-/* exec sql type c is char reference */
+/* exec sql type c is char reference */
#line 13 "type.pgc"
-typedef char* c;
+typedef char *c;
/* exec sql begin declare section */
-
-
-
-
-struct TBempl {
+
+
+
+
+struct TBempl
+{
#line 19 "type.pgc"
- mmInteger idnum ;
-
+ mmInteger idnum;
+
#line 20 "type.pgc"
- mmChar name [ 21 ] ;
-
+ mmChar name[21];
+
#line 21 "type.pgc"
- mmSmallInt accs ;
- } ;/* exec sql end declare section */
+ mmSmallInt accs;
+}; /* exec sql end declare section */
+
#line 23 "type.pgc"
int
-main (void)
+main(void)
{
- /* exec sql begin declare section */
-
-
-
-
-
-
-
-
-
+ /* exec sql begin declare section */
+
+
+
+
+
+
+
+
+
#line 29 "type.pgc"
- struct TBempl empl ;
-
+ struct TBempl empl;
+
#line 30 "type.pgc"
- string str ;
-
+ string str;
+
#line 31 "type.pgc"
- c ptr = NULL ;
-
+ c ptr = NULL;
+
#line 36 "type.pgc"
- struct varchar_vc {
+ struct varchar_vc
+ {
#line 34 "type.pgc"
- int len ;
-
+ int len;
+
#line 35 "type.pgc"
- char text [ 10 ] ;
- } vc ;
+ char text [10];
+ } vc;
+
/* exec sql end declare section */
#line 37 "type.pgc"
- /* exec sql var vc is [ 10 ] */
+ /* exec sql var vc is [ 10 ] */
#line 39 "type.pgc"
- ECPGdebug (1, stderr);
+ ECPGdebug(1, stderr);
- empl.idnum = 1;
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ empl.idnum = 1;
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 43 "type.pgc"
- if (sqlca.sqlcode)
- {
- printf ("connect error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("connect error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
- { ECPGdo(__LINE__, 0, 1, NULL, "create table empl ( idnum integer , name char ( 20 ) , accs smallint , string1 char ( 10 ) , string2 char ( 10 ) , string3 char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table empl ( idnum integer , name char ( 20 ) , accs smallint , string1 char ( 10 ) , string2 char ( 10 ) , string3 char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 51 "type.pgc"
- if (sqlca.sqlcode)
- {
- printf ("create error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("create error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into empl values ( 1 , 'user name' , 320 , 'first str' , 'second str' , 'third str' ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into empl values ( 1 , 'user name' , 320 , 'first str' , 'second str' , 'third str' ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 58 "type.pgc"
- if (sqlca.sqlcode)
- {
- printf ("insert error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
-
- { ECPGdo(__LINE__, 0, 1, NULL, "select idnum , name , accs , string1 , string2 , string3 from empl where idnum = ? ",
- ECPGt_long,&(empl.idnum),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_long,&(empl.idnum),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,&(empl.name),(long)21,(long)1,(21)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(empl.accs),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(str),(long)11,(long)1,(11)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,&(ptr),(long)0,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_varchar,&(vc),(long)10,(long)1,sizeof(struct varchar_vc),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ if (sqlca.sqlcode)
+ {
+ printf("insert error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select idnum , name , accs , string1 , string2 , string3 from empl where idnum = ? ",
+ ECPGt_long, &(empl.idnum), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_long, &(empl.idnum), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, &(empl.name), (long) 21, (long) 1, (21) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(empl.accs), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (str), (long) 11, (long) 1, (11) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, &(ptr), (long) 0, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_varchar, &(vc), (long) 10, (long) 1, sizeof(struct varchar_vc),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 68 "type.pgc"
- if (sqlca.sqlcode)
- {
- printf ("select error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
- printf ("id=%ld name='%s' accs=%d str='%s' ptr='%s' vc='%10.10s'\n", empl.idnum, empl.name, empl.accs, str, ptr, vc.text);
+ if (sqlca.sqlcode)
+ {
+ printf("select error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
+ printf("id=%ld name='%s' accs=%d str='%s' ptr='%s' vc='%10.10s'\n", empl.idnum, empl.name, empl.accs, str, ptr, vc.text);
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 76 "type.pgc"
- free(ptr);
- exit (0);
+ free(ptr);
+ exit(0);
}
#line 6 "variable.pgc"
-/* exec sql type c is char reference */
+/* exec sql type c is char reference */
#line 8 "variable.pgc"
-typedef char* c;
+typedef char *c;
-/* exec sql type ind is union {
+/* exec sql type ind is union {
#line 11 "variable.pgc"
- int integer ;
-
+ int integer ;
+
#line 11 "variable.pgc"
- short smallint ;
- } */
+ short smallint ;
+ } */
#line 11 "variable.pgc"
-typedef union { int integer; short smallint; } ind;
+typedef union
+{
+ int integer;
+ short smallint;
+} ind;
#define BUFFERSIZ 8
-/* exec sql type str is [ BUFFERSIZ ] */
+/* exec sql type str is [ BUFFERSIZ ] */
#line 15 "variable.pgc"
-/* declare cur cursor for select name , born , age , married , children from family */
+/* declare cur cursor for select name , born , age , married , children from family */
#line 18 "variable.pgc"
int
-main (void)
+main(void)
{
- struct birthinfo {
+ struct birthinfo
+ {
#line 23 "variable.pgc"
- long born ;
-
+ long born;
+
#line 23 "variable.pgc"
- short age ;
- } ;
+ short age;
+ };
+
#line 23 "variable.pgc"
/* exec sql begin declare section */
-
-
-
-
-
-
-
+
+
+
+
+
+
+
#line 27 "variable.pgc"
- struct personal_struct {
+ struct personal_struct
+ {
#line 25 "variable.pgc"
- struct varchar_name { int len; char arr[ BUFFERSIZ ]; } name ;
-
+ struct varchar_name
+ {
+ int len;
+ char arr[BUFFERSIZ];
+ } name;
+
#line 26 "variable.pgc"
- struct birthinfo birth ;
- } personal , * p ;
-
+ struct birthinfo birth;
+ } personal, *p;
+
#line 30 "variable.pgc"
- struct personal_indicator {
+ struct personal_indicator
+ {
#line 28 "variable.pgc"
- int ind_name ;
-
+ int ind_name;
+
#line 29 "variable.pgc"
- struct birthinfo ind_birth ;
- } ind_personal , * i ;
-
+ struct birthinfo ind_birth;
+ } ind_personal, *i;
+
#line 31 "variable.pgc"
- ind ind_children ;
+ ind ind_children;
+
/* exec sql end declare section */
#line 32 "variable.pgc"
-
+
#line 34 "variable.pgc"
- char * married = NULL ;
+ char *married = NULL;
#line 34 "variable.pgc"
-
+
#line 35 "variable.pgc"
- long ind_married ;
+ long ind_married;
#line 35 "variable.pgc"
-
+
#line 36 "variable.pgc"
- ind children ;
+ ind children;
#line 36 "variable.pgc"
- char msg[128];
+ char msg[128];
- ECPGdebug(1, stderr);
+ ECPGdebug(1, stderr);
strcpy(msg, "connect");
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 43 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 43 "variable.pgc"
strcpy(msg, "set");
- { ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to iso", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to iso", ECPGt_EOIT, ECPGt_EORT);
#line 46 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 46 "variable.pgc"
strcpy(msg, "create");
- { ECPGdo(__LINE__, 0, 1, NULL, "create table family ( name char ( 8 ) , born integer , age smallint , married date , children integer ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table family ( name char ( 8 ) , born integer , age smallint , married date , children integer ) ", ECPGt_EOIT, ECPGt_EORT);
#line 49 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 49 "variable.pgc"
strcpy(msg, "insert");
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , married , children ) values ( 'Mum' , '19870714' , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , married , children ) values ( 'Mum' , '19870714' , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 52 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 52 "variable.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , born , married , children ) values ( 'Dad' , '19610721' , '19870714' , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , born , married , children ) values ( 'Dad' , '19610721' , '19870714' , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 53 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 53 "variable.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 1' , 16 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 1' , 16 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 54 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 54 "variable.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 2' , 14 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 2' , 14 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 55 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 55 "variable.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 3' , 9 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into family ( name , age ) values ( 'Child 3' , 9 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 56 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 56 "variable.pgc"
strcpy(msg, "commit");
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 59 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 59 "variable.pgc"
strcpy(msg, "open");
- { ECPGdo(__LINE__, 0, 1, NULL, "declare cur cursor for select name , born , age , married , children from family ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare cur cursor for select name , born , age , married , children from family ", ECPGt_EOIT, ECPGt_EORT);
#line 62 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 62 "variable.pgc"
- /* exec sql whenever not found break ; */
+ /* exec sql whenever not found break ; */
#line 64 "variable.pgc"
- p=&personal;
- i=&ind_personal;
+ p = &personal;
+ i = &ind_personal;
memset(i, 0, sizeof(ind_personal));
- while (1) {
+ while (1)
+ {
strcpy(msg, "fetch");
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch cur", ECPGt_EOIT,
- ECPGt_varchar,&(p->name),(long)BUFFERSIZ,(long)1,sizeof(struct varchar_name),
- ECPGt_int,&(i->ind_name),(long)1,(long)1,sizeof(int),
- ECPGt_long,&(p->birth.born),(long)1,(long)1,sizeof(long),
- ECPGt_long,&(i->ind_birth.born),(long)1,(long)1,sizeof(long),
- ECPGt_short,&(p->birth.age),(long)1,(long)1,sizeof(short),
- ECPGt_short,&(i->ind_birth.age),(long)1,(long)1,sizeof(short),
- ECPGt_char,&(married),(long)0,(long)1,(1)*sizeof(char),
- ECPGt_long,&(ind_married),(long)1,(long)1,sizeof(long),
- ECPGt_int,&(children.integer),(long)1,(long)1,sizeof(int),
- ECPGt_short,&(ind_children.smallint),(long)1,(long)1,sizeof(short), ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch cur", ECPGt_EOIT,
+ ECPGt_varchar, &(p->name), (long) BUFFERSIZ, (long) 1, sizeof(struct varchar_name),
+ ECPGt_int, &(i->ind_name), (long) 1, (long) 1, sizeof(int),
+ ECPGt_long, &(p->birth.born), (long) 1, (long) 1, sizeof(long),
+ ECPGt_long, &(i->ind_birth.born), (long) 1, (long) 1, sizeof(long),
+ ECPGt_short, &(p->birth.age), (long) 1, (long) 1, sizeof(short),
+ ECPGt_short, &(i->ind_birth.age), (long) 1, (long) 1, sizeof(short),
+ ECPGt_char, &(married), (long) 0, (long) 1, (1) * sizeof(char),
+ ECPGt_long, &(ind_married), (long) 1, (long) 1, sizeof(long),
+ ECPGt_int, &(children.integer), (long) 1, (long) 1, sizeof(int),
+ ECPGt_short, &(ind_children.smallint), (long) 1, (long) 1, sizeof(short), ECPGt_EORT);
#line 71 "variable.pgc"
-if (sqlca.sqlcode == ECPG_NOT_FOUND) break;
+ if (sqlca.sqlcode == ECPG_NOT_FOUND)
+ break;
#line 71 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 71 "variable.pgc"
printf("%8.8s", personal.name.arr);
}
strcpy(msg, "close");
- { ECPGdo(__LINE__, 0, 1, NULL, "close cur", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close cur", ECPGt_EOIT, ECPGt_EORT);
#line 88 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 88 "variable.pgc"
strcpy(msg, "drop");
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table family ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table family ", ECPGt_EOIT, ECPGt_EORT);
#line 91 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 91 "variable.pgc"
strcpy(msg, "commit");
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 94 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 94 "variable.pgc"
- strcpy(msg, "disconnect");
- { ECPGdisconnect(__LINE__, "CURRENT");
+ strcpy(msg, "disconnect");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 97 "variable.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 97 "variable.pgc"
#line 5 "whenever.pgc"
-static void print(char *msg)
+static void
+print(char *msg)
{
- fprintf(stderr, "Error in statement '%s':\n", msg);
- sqlprint();
+ fprintf(stderr, "Error in statement '%s':\n", msg);
+ sqlprint();
}
-static void print2(void)
+static void
+print2(void)
{
- fprintf(stderr, "Found another error\n");
- sqlprint();
+ fprintf(stderr, "Found another error\n");
+ sqlprint();
}
-static void warn(void)
+static void
+warn(void)
{
- fprintf(stderr, "Warning: At least one column was truncated\n");
+ fprintf(stderr, "Warning: At least one column was truncated\n");
}
-int main(void)
+int
+main(void)
{
-
+
#line 26 "whenever.pgc"
- int i ;
+ int i;
#line 26 "whenever.pgc"
-
+
#line 27 "whenever.pgc"
- char c [ 6 ] ;
+ char c[6];
#line 27 "whenever.pgc"
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 31 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 31 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( i int , c char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( i int , c char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 32 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values ( 1 , 'abcdefghij' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values ( 1 , 'abcdefghij' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 33 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "whenever.pgc"
/* exec sql whenever sql_warning do warn ( ) ; */
#line 35 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(c),(long)6,(long)1,(6)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (c), (long) 6, (long) 1, (6) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 36 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 36 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 37 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 37 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 39 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 39 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 40 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 40 "whenever.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 40 "whenever.pgc"
/* exec sql whenever sqlerror do print ( \"select\" ) ; */
#line 42 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 43 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 43 "whenever.pgc"
-if (sqlca.sqlcode < 0) print ( "select" );}
+ if (sqlca.sqlcode < 0)
+ print("select");
+ }
#line 43 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 44 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 44 "whenever.pgc"
-if (sqlca.sqlcode < 0) print ( "select" );}
+ if (sqlca.sqlcode < 0)
+ print("select");
+ }
#line 44 "whenever.pgc"
/* exec sql whenever sqlerror call print2 ( ) ; */
#line 46 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 47 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 47 "whenever.pgc"
-if (sqlca.sqlcode < 0) print2 ( );}
+ if (sqlca.sqlcode < 0)
+ print2();
+ }
#line 47 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 48 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 48 "whenever.pgc"
-if (sqlca.sqlcode < 0) print2 ( );}
+ if (sqlca.sqlcode < 0)
+ print2();
+ }
#line 48 "whenever.pgc"
/* exec sql whenever sqlerror continue ; */
#line 50 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 51 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );}
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
+ }
#line 51 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 52 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );}
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
+ }
#line 52 "whenever.pgc"
/* exec sql whenever sqlerror goto error ; */
#line 54 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 55 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 55 "whenever.pgc"
-if (sqlca.sqlcode < 0) goto error;}
+ if (sqlca.sqlcode < 0)
+ goto error;
+ }
#line 55 "whenever.pgc"
printf("Should not be reachable\n");
- error:
- { ECPGtrans(__LINE__, NULL, "rollback");
+error:
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 59 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 59 "whenever.pgc"
-if (sqlca.sqlcode < 0) goto error;}
+ if (sqlca.sqlcode < 0)
+ goto error;
+ }
#line 59 "whenever.pgc"
/* exec sql whenever sqlerror stop ; */
#line 61 "whenever.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from nonexistant ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 62 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 62 "whenever.pgc"
-if (sqlca.sqlcode < 0) exit (1);}
+ if (sqlca.sqlcode < 0)
+ exit(1);
+ }
#line 62 "whenever.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 63 "whenever.pgc"
-if (sqlca.sqlwarn[0] == 'W') warn ( );
+ if (sqlca.sqlwarn[0] == 'W')
+ warn();
#line 63 "whenever.pgc"
-if (sqlca.sqlcode < 0) exit (1);}
+ if (sqlca.sqlcode < 0)
+ exit(1);
+ }
#line 63 "whenever.pgc"
- exit (0);
-}
+ exit(0);
+}
#ifdef __cplusplus
}
#endif
-
#endif
#line 7 "array.pgc"
int
-main (void)
+main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
-
+
+
+
+
+
+
#line 14 "array.pgc"
- int i = 1 ;
-
+ int i = 1;
+
#line 15 "array.pgc"
- int * did = & i ;
-
+ int *did = &i;
+
#line 16 "array.pgc"
- int a [ 10 ] = { 9 , 8 , 7 , 6 , 5 , 4 , 3 , 2 , 1 , 0 } ;
-
+ int a[10] = {9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
+
#line 17 "array.pgc"
- char text [ 25 ] = "klmnopqrst" ;
-
+ char text [25] = "klmnopqrst";
+
#line 18 "array.pgc"
- char * t = ( char * ) malloc ( 11 ) ;
-
+ char *t = (char *) malloc(11);
+
#line 19 "array.pgc"
- double f ;
+ double f;
+
/* exec sql end declare section */
#line 20 "array.pgc"
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 27 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 27 "array.pgc"
- { ECPGsetcommit(__LINE__, "on", NULL);
+ {
+ ECPGsetcommit(__LINE__, "on", NULL);
#line 29 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "array.pgc"
- { ECPGtrans(__LINE__, NULL, "begin transaction ");
+ {
+ ECPGtrans(__LINE__, NULL, "begin transaction ");
#line 31 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 31 "array.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( f float , i int , a int [ 10 ] , text char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( f float , i int , a int [ 10 ] , text char ( 10 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 33 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "array.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 404.90 , 3 , '{0,1,2,3,4,5,6,7,8,9}' , 'abcdefghij' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 404.90 , 3 , '{0,1,2,3,4,5,6,7,8,9}' , 'abcdefghij' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 35 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "array.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 140787.0 , 2 , ? , ? ) ",
- ECPGt_int,(a),(long)1,(long)10,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(text),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 140787.0 , 2 , ? , ? ) ",
+ ECPGt_int, (a), (long) 1, (long) 10, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (text), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 37 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "array.pgc"
-
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 14.07 , ? , ? , ? ) ",
- ECPGt_int,&(did),(long)1,(long)0,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,(a),(long)1,(long)10,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,&(t),(long)0,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( f , i , a , text ) values ( 14.07 , ? , ? , ? ) ",
+ ECPGt_int, &(did), (long) 1, (long) 0, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, (a), (long) 1, (long) 10, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, &(t), (long) 0, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 39 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "array.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 41 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 41 "array.pgc"
- { ECPGtrans(__LINE__, NULL, "begin transaction ");
+ {
+ ECPGtrans(__LINE__, NULL, "begin transaction ");
#line 43 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 43 "array.pgc"
-
- { ECPGdo(__LINE__, 0, 1, NULL, "select f , text from test where i = 1 ", ECPGt_EOIT,
- ECPGt_double,&(f),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(text),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select f , text from test where i = 1 ", ECPGt_EOIT,
+ ECPGt_double, &(f), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (text), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 48 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 48 "array.pgc"
printf("Found f=%f text=%10.10s\n", f, text);
- f=140787;
- { ECPGdo(__LINE__, 0, 1, NULL, "select a , text from test where f = ? ",
- ECPGt_double,&(f),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_int,(a),(long)1,(long)10,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,&(t),(long)0,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ f = 140787;
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select a , text from test where f = ? ",
+ ECPGt_double, &(f), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_int, (a), (long) 1, (long) 10, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, &(t), (long) 0, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 56 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 56 "array.pgc"
printf("Found text=%10.10s\n", t);
- { ECPGdo(__LINE__, 0, 1, NULL, "select a from test where f = ? ",
- ECPGt_double,&(f),(long)1,(long)1,sizeof(double),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_char,(text),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select a from test where f = ? ",
+ ECPGt_double, &(f), (long) 1, (long) 1, sizeof(double),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_char, (text), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 66 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 66 "array.pgc"
printf("Found text=%s\n", text);
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
#line 70 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 70 "array.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 72 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 72 "array.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 74 "array.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 74 "array.pgc"
/* exec sql begin declare section */
-
-
-
-
-
-struct TBempl {
+
+
+
+
+
+struct TBempl
+{
#line 9 "binary.pgc"
- long idnum ;
-
+ long idnum;
+
#line 10 "binary.pgc"
- char name [ 21 ] ;
-
+ char name[21];
+
#line 11 "binary.pgc"
- short accs ;
-
+ short accs;
+
#line 12 "binary.pgc"
- char byte [ 20 ] ;
- } ;/* exec sql end declare section */
+ char byte[20];
+}; /* exec sql end declare section */
+
#line 14 "binary.pgc"
int
-main (void)
+main(void)
{
- /* exec sql begin declare section */
-
-
-
+ /* exec sql begin declare section */
+
+
+
#line 20 "binary.pgc"
- struct TBempl empl ;
-
+ struct TBempl empl;
+
#line 21 "binary.pgc"
- char * data = "\\001\\155\\000\\212" ;
+ char *data = "\\001\\155\\000\\212";
+
/* exec sql end declare section */
#line 22 "binary.pgc"
- int i;
+ int i;
- ECPGdebug (1, stderr);
+ ECPGdebug(1, stderr);
- empl.idnum = 1;
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ empl.idnum = 1;
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 28 "binary.pgc"
- if (sqlca.sqlcode)
- {
- printf ("connect error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("connect error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
- { ECPGdo(__LINE__, 0, 1, NULL, "create table empl ( idnum integer , name char ( 20 ) , accs smallint , byte bytea ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table empl ( idnum integer , name char ( 20 ) , accs smallint , byte bytea ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 36 "binary.pgc"
- if (sqlca.sqlcode)
- {
- printf ("create error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
-
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into empl values ( 1 , 'first user' , 320 , ? ) ",
- ECPGt_char,&(data),(long)0,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);}
+ if (sqlca.sqlcode)
+ {
+ printf("create error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into empl values ( 1 , 'first user' , 320 , ? ) ",
+ ECPGt_char, &(data), (long) 0, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ }
#line 43 "binary.pgc"
- if (sqlca.sqlcode)
- {
- printf ("insert error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("insert error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
- /* declare C cursor for select name , accs , byte from empl where idnum = ? */
+ /*
+ * declare C cursor for select name , accs , byte from empl where
+ * idnum = ?
+ */
#line 50 "binary.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare C cursor for select name , accs , byte from empl where idnum = ? ",
- ECPGt_long,&(empl.idnum),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare C cursor for select name , accs , byte from empl where idnum = ? ",
+ ECPGt_long, &(empl.idnum), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ }
#line 51 "binary.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch C", ECPGt_EOIT,
- ECPGt_char,(empl.name),(long)21,(long)1,(21)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(empl.accs),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(empl.byte),(long)20,(long)1,(20)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch C", ECPGt_EOIT,
+ ECPGt_char, (empl.name), (long) 21, (long) 1, (21) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(empl.accs), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (empl.byte), (long) 20, (long) 1, (20) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 52 "binary.pgc"
- if (sqlca.sqlcode)
- {
- printf ("fetch error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("fetch error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
+
+ printf("name=%s, accs=%d byte=%s\n", empl.name, empl.accs, empl.byte);
- printf ("name=%s, accs=%d byte=%s\n", empl.name, empl.accs, empl.byte);
+ memset(empl.name, 0, 21L);
+ memset(empl.byte, '#', 20L);
- memset(empl.name, 0, 21L);
- memset(empl.byte, '#', 20L);
- /* declare B binary cursor for select name , accs , byte from empl where idnum = ? */
+ /*
+ * declare B binary cursor for select name , accs , byte from empl
+ * where idnum = ?
+ */
#line 63 "binary.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare B binary cursor for select name , accs , byte from empl where idnum = ? ",
- ECPGt_long,&(empl.idnum),(long)1,(long)1,sizeof(long),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare B binary cursor for select name , accs , byte from empl where idnum = ? ",
+ ECPGt_long, &(empl.idnum), (long) 1, (long) 1, sizeof(long),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ }
#line 64 "binary.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch B", ECPGt_EOIT,
- ECPGt_char,(empl.name),(long)21,(long)1,(21)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_short,&(empl.accs),(long)1,(long)1,sizeof(short),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(empl.byte),(long)20,(long)1,(20)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch B", ECPGt_EOIT,
+ ECPGt_char, (empl.name), (long) 21, (long) 1, (21) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_short, &(empl.accs), (long) 1, (long) 1, sizeof(short),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (empl.byte), (long) 20, (long) 1, (20) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 65 "binary.pgc"
- if (sqlca.sqlcode)
- {
- printf ("fetch error = %ld\n", sqlca.sqlcode);
- exit (sqlca.sqlcode);
- }
+ if (sqlca.sqlcode)
+ {
+ printf("fetch error = %ld\n", sqlca.sqlcode);
+ exit(sqlca.sqlcode);
+ }
- { ECPGdo(__LINE__, 0, 1, NULL, "close B", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close B", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 72 "binary.pgc"
- /* do not print a.accs because big/little endian will have different outputs here */
- printf ("name=%s, byte=", empl.name);
- for (i=0; i<20; i++)
- {
- if (empl.byte[i] == '#')
- break;
- printf("(%o)", (unsigned char)empl.byte[i]);
- }
- printf("\n");
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ /*
+ * do not print a.accs because big/little endian will have different
+ * outputs here
+ */
+ printf("name=%s, byte=", empl.name);
+ for (i = 0; i < 20; i++)
+ {
+ if (empl.byte[i] == '#')
+ break;
+ printf("(%o)", (unsigned char) empl.byte[i]);
+ }
+ printf("\n");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 83 "binary.pgc"
- exit (0);
+ exit(0);
}
#ifdef __cplusplus
}
#endif
-
#endif
#line 1 "code100.pgc"
-int main(int argc, char **argv)
-{ /* exec sql begin declare section */
-
-
+int
+main(int argc, char **argv)
+{ /* exec sql begin declare section */
+
+
#line 9 "code100.pgc"
- int index ;
+ int index;
+
/* exec sql end declare section */
#line 10 "code100.pgc"
- ECPGdebug(1,stderr);
-
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ ECPGdebug(1, stderr);
+
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 15 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( \"index\" numeric ( 3 ) primary key , \"payload\" int4 not null ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( \"index\" numeric ( 3 ) primary key , \"payload\" int4 not null ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 20 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
- { ECPGtrans(__LINE__, NULL, "commit");}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 22 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
- for (index=0;index<10;++index)
- { { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( payload , index ) values( 0 , ? ) ",
- ECPGt_int,&(index),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+
+ for (index = 0; index < 10; ++index)
+ {
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( payload , index ) values( 0 , ? ) ",
+ ECPGt_int, &(index), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ }
#line 28 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
- }
- { ECPGtrans(__LINE__, NULL, "commit");}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+ }
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 31 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
- { ECPGdo(__LINE__, 0, 1, NULL, "update test set payload = payload + 1 where index = - 1 ", ECPGt_EOIT, ECPGt_EORT);}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "update test set payload = payload + 1 where index = - 1 ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 35 "code100.pgc"
- if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
- { ECPGdo(__LINE__, 0, 1, NULL, "delete from test where index = - 1 ", ECPGt_EOIT, ECPGt_EORT);}
+ if (sqlca.sqlcode != 100)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "delete from test where index = - 1 ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 38 "code100.pgc"
- if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
+ if (sqlca.sqlcode != 100)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( select * from test where index = - 1 ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( select * from test where index = - 1 ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 41 "code100.pgc"
- if (sqlca.sqlcode!=100) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
+ if (sqlca.sqlcode != 100)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 44 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
- { ECPGtrans(__LINE__, NULL, "commit");}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 46 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
-
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 49 "code100.pgc"
- if (sqlca.sqlcode) printf("%ld:%s\n",sqlca.sqlcode,sqlca.sqlerrm.sqlerrmc);
- return 0;
+ if (sqlca.sqlcode)
+ printf("%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+ return 0;
}
#ifdef __cplusplus
}
#endif
-
#endif
#line 3 "copystdout.pgc"
int
-main ()
+main()
{
/*
EXEC SQL BEGIN DECLARE SECTION;
EXEC SQL END DECLARE SECTION;
*/
- ECPGdebug (1, stderr);
+ ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 19 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 19 "copystdout.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table foo ( a int , b varchar ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table foo ( a int , b varchar ) ", ECPGt_EOIT, ECPGt_EORT);
#line 20 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 20 "copystdout.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 5 , 'abc' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 5 , 'abc' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 21 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 21 "copystdout.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 6 , 'def' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 6 , 'def' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 22 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 22 "copystdout.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 7 , 'ghi' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into foo values( 7 , 'ghi' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 23 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 23 "copystdout.pgc"
- /* produces expected file "/tmp/foo" */
- /* EXEC SQL COPY foo TO:fname WITH DELIMITER ','; */
- /* printf ("copy to /tmp/foo : sqlca.sqlcode = %ld", sqlca.sqlcode); */
+ /* produces expected file "/tmp/foo" */
+ /* EXEC SQL COPY foo TO:fname WITH DELIMITER ','; */
+ /* printf ("copy to /tmp/foo : sqlca.sqlcode = %ld", sqlca.sqlcode); */
- { ECPGdo(__LINE__, 0, 1, NULL, "copy foo to stdout with delimiter ','", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "copy foo to stdout with delimiter ','", ECPGt_EOIT, ECPGt_EORT);
#line 29 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "copystdout.pgc"
- printf ("copy to STDOUT : sqlca.sqlcode = %ld\n", sqlca.sqlcode);
+ printf("copy to STDOUT : sqlca.sqlcode = %ld\n", sqlca.sqlcode);
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 32 "copystdout.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "copystdout.pgc"
- return 0;
+ return 0;
}
#ifdef __cplusplus
}
#endif
-
#endif
#line 1 "define.pgc"
-int main(void)
+int
+main(void)
{
- /* exec sql begin declare section */
-
-
-
+ /* exec sql begin declare section */
+
+
+
#line 10 "define.pgc"
- int i ;
-
+ int i;
+
#line 11 "define.pgc"
- char s [ 200 ] ;
+ char s[200];
+
/* exec sql end declare section */
#line 12 "define.pgc"
- ECPGdebug(1, stderr);
+ ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do sqlprint ( ) ; */
+ /* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 16 "define.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 17 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 17 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 19 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 19 "define.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 29 , 'abcdef' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 29 , 'abcdef' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 20 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 20 "define.pgc"
-
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'defined' ) ", ECPGt_EOIT, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'defined' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 23 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 23 "define.pgc"
-
-
-
-
-
-
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'someothervar not defined' ) ", ECPGt_EOIT, ECPGt_EORT);
+
+
+
+
+
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'someothervar not defined' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 31 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 31 "define.pgc"
-
-
- { ECPGdo(__LINE__, 0, 1, NULL, "select 1 , 29 :: text || '-' || 'abcdef' ", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(s),(long)200,(long)1,(200)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select 1 , 29 :: text || '-' || 'abcdef' ", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (s), (long) 200, (long) 1, (200) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 36 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "define.pgc"
- printf("i: %d, s: %s\n", i, s);
+ printf("i: %d, s: %s\n", i, s);
+
-
-
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 29 , 'no string' ) ", ECPGt_EOIT, ECPGt_EORT);
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 29 , 'no string' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 42 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 42 "define.pgc"
-
- /* no value */
-
-
-
-
-
-
- { ECPGdo(__LINE__, 0, 1, NULL, "set TIMEZONE to 'UTC'", ECPGt_EOIT, ECPGt_EORT);
+ /* no value */
+
+
+
+
+
+
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set TIMEZONE to 'UTC'", ECPGt_EOIT, ECPGt_EORT);
#line 53 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 53 "define.pgc"
-
- { ECPGdisconnect(__LINE__, "CURRENT");
+
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 56 "define.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 56 "define.pgc"
- return 0;
+ return 0;
}
main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
#line 8 "desc.pgc"
- char * stmt1 = "INSERT INTO test1 VALUES (?, ?)" ;
-
+ char *stmt1 = "INSERT INTO test1 VALUES (?, ?)";
+
#line 9 "desc.pgc"
- char * stmt2 = "SELECT * from test1 where a = ? and b = ?" ;
-
+ char *stmt2 = "SELECT * from test1 where a = ? and b = ?";
+
#line 10 "desc.pgc"
- char * stmt3 = "SELECT * from test1 where a = ?" ;
-
+ char *stmt3 = "SELECT * from test1 where a = ?";
+
#line 12 "desc.pgc"
- int val1 = 1 ;
-
+ int val1 = 1;
+
#line 13 "desc.pgc"
- char val2 [ 4 ] = "one" , val2output [] = "AAA" ;
-
+ char val2[4] = "one",
+ val2output[] = "AAA";
+
#line 14 "desc.pgc"
- int val1output = 2 , val2i = 0 ;
-
+ int val1output = 2,
+ val2i = 0;
+
#line 15 "desc.pgc"
- int val2null = - 1 ;
-
+ int val2null = -1;
+
#line 16 "desc.pgc"
- int ind1 , ind2 ;
+ int ind1,
+ ind2;
+
/* exec sql end declare section */
#line 17 "desc.pgc"
ECPGallocate_desc(__LINE__, "indesc");
#line 21 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 21 "desc.pgc"
ECPGallocate_desc(__LINE__, "outdesc");
#line 22 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 22 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 1,ECPGd_data,
- ECPGt_int,&(val1),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 1, ECPGd_data,
+ ECPGt_int, &(val1), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 24 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 2,ECPGd_data,
- ECPGt_char,(val2),(long)4,(long)1,(4)*sizeof(char), ECPGd_indicator,
- ECPGt_int,&(val2i),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 2, ECPGd_data,
+ ECPGt_char, (val2), (long) 4, (long) 1, (4) * sizeof(char), ECPGd_indicator,
+ ECPGt_int, &(val2i), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 25 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 25 "desc.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 27 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 27 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test1 ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test1 ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 29 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "desc.pgc"
- { ECPGprepare(__LINE__, "foo1" , stmt1);
+ {
+ ECPGprepare(__LINE__, "foo1", stmt1);
#line 30 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 30 "desc.pgc"
- { ECPGprepare(__LINE__, "foo2" , stmt2);
+ {
+ ECPGprepare(__LINE__, "foo2", stmt2);
#line 31 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 31 "desc.pgc"
- { ECPGprepare(__LINE__, "foo3" , stmt3);
+ {
+ ECPGprepare(__LINE__, "foo3", stmt3);
#line 32 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(ECPGprepared_statement("foo1")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo1")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 34 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 1,ECPGd_data,
- ECPGt_const,"2",(long)1,(long)1,strlen("2"), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 1, ECPGd_data,
+ ECPGt_const, "2", (long) 1, (long) 1, strlen("2"), ECPGd_EODT);
#line 36 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 2,ECPGd_data,
- ECPGt_char,(val2),(long)4,(long)1,(4)*sizeof(char), ECPGd_indicator,
- ECPGt_int,&(val2null),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 2, ECPGd_data,
+ ECPGt_char, (val2), (long) 4, (long) 1, (4) * sizeof(char), ECPGd_indicator,
+ ECPGt_int, &(val2null), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 37 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(ECPGprepared_statement("foo1")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo1")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 39 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 1,ECPGd_data,
- ECPGt_const,"3",(long)1,(long)1,strlen("3"), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 1, ECPGd_data,
+ ECPGt_const, "3", (long) 1, (long) 1, strlen("3"), ECPGd_EODT);
#line 41 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 41 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 2,ECPGd_data,
- ECPGt_const,"this is a long test",(long)19,(long)1,strlen("this is a long test"), ECPGd_indicator,
- ECPGt_int,&(val1),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 2, ECPGd_data,
+ ECPGt_const, "this is a long test", (long) 19, (long) 1, strlen("this is a long test"), ECPGd_indicator,
+ ECPGt_int, &(val1), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 42 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 42 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(ECPGprepared_statement("foo1")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo1")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 44 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 44 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 1,ECPGd_data,
- ECPGt_int,&(val1),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 1, ECPGd_data,
+ ECPGt_int, &(val1), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 46 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 46 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 2,ECPGd_data,
- ECPGt_char,(val2),(long)4,(long)1,(4)*sizeof(char), ECPGd_indicator,
- ECPGt_int,&(val2i),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 2, ECPGd_data,
+ ECPGt_char, (val2), (long) 4, (long) 1, (4) * sizeof(char), ECPGd_indicator,
+ ECPGt_int, &(val2i), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 47 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 47 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(ECPGprepared_statement("foo2")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_descriptor, "outdesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo2")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_descriptor, "outdesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 49 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 49 "desc.pgc"
- { ECPGget_desc(__LINE__, "outdesc", 1,ECPGd_data,
- ECPGt_char,(val2output),(long)sizeof("AAA"),(long)1,(sizeof("AAA"))*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "outdesc", 1, ECPGd_data,
+ ECPGt_char, (val2output), (long) sizeof("AAA"), (long) 1, (sizeof("AAA")) *sizeof(char), ECPGd_EODT);
#line 51 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 51 "desc.pgc"
printf("output = %s\n", val2output);
/* declare c1 cursor for ? */
#line 54 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare c1 cursor for ?",
- ECPGt_char_variable,(ECPGprepared_statement("foo2")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare c1 cursor for ?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo2")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 55 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 55 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch next from c1", ECPGt_EOIT,
- ECPGt_int,&(val1output),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(ind1),(long)1,(long)1,sizeof(int),
- ECPGt_char,(val2output),(long)sizeof("AAA"),(long)1,(sizeof("AAA"))*sizeof(char),
- ECPGt_int,&(ind2),(long)1,(long)1,sizeof(int), ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch next from c1", ECPGt_EOIT,
+ ECPGt_int, &(val1output), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(ind1), (long) 1, (long) 1, sizeof(int),
+ ECPGt_char, (val2output), (long) sizeof("AAA"), (long) 1, (sizeof("AAA")) *sizeof(char),
+ ECPGt_int, &(ind2), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
#line 57 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 57 "desc.pgc"
printf("val1=%d (ind1: %d) val2=%s (ind2: %d)\n",
- val1output, ind1, val2output, ind2);
+ val1output, ind1, val2output, ind2);
- { ECPGdo(__LINE__, 0, 1, NULL, "close c1", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close c1", ECPGt_EOIT, ECPGt_EORT);
#line 61 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 61 "desc.pgc"
- { ECPGset_desc_header(__LINE__, "indesc", (int)(1));
+ {
+ ECPGset_desc_header(__LINE__, "indesc", (int) (1));
#line 63 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 63 "desc.pgc"
- { ECPGset_desc(__LINE__, "indesc", 1,ECPGd_data,
- ECPGt_const,"2",(long)1,(long)1,strlen("2"), ECPGd_EODT);
+ {
+ ECPGset_desc(__LINE__, "indesc", 1, ECPGd_data,
+ ECPGt_const, "2", (long) 1, (long) 1, strlen("2"), ECPGd_EODT);
#line 64 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 64 "desc.pgc"
/* declare c2 cursor for ? */
#line 66 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare c2 cursor for ?",
- ECPGt_char_variable,(ECPGprepared_statement("foo3")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_descriptor, "indesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare c2 cursor for ?",
+ ECPGt_char_variable, (ECPGprepared_statement("foo3")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_descriptor, "indesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 67 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 67 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch next from c2", ECPGt_EOIT,
- ECPGt_int,&(val1output),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(val2output),(long)sizeof("AAA"),(long)1,(sizeof("AAA"))*sizeof(char),
- ECPGt_int,&(val2i),(long)1,(long)1,sizeof(int), ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch next from c2", ECPGt_EOIT,
+ ECPGt_int, &(val1output), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (val2output), (long) sizeof("AAA"), (long) 1, (sizeof("AAA")) *sizeof(char),
+ ECPGt_int, &(val2i), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
#line 69 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 69 "desc.pgc"
printf("val1=%d val2=%s\n", val1output, val2i ? "null" : val2output);
- { ECPGdo(__LINE__, 0, 1, NULL, "close c2", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close c2", ECPGt_EOIT, ECPGt_EORT);
#line 72 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 72 "desc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from test1 where a = 3 ", ECPGt_EOIT,
- ECPGt_int,&(val1output),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(val2output),(long)sizeof("AAA"),(long)1,(sizeof("AAA"))*sizeof(char),
- ECPGt_int,&(val2i),(long)1,(long)1,sizeof(int), ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from test1 where a = 3 ", ECPGt_EOIT,
+ ECPGt_int, &(val1output), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (val2output), (long) sizeof("AAA"), (long) 1, (sizeof("AAA")) *sizeof(char),
+ ECPGt_int, &(val2i), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
#line 74 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 74 "desc.pgc"
printf("val1=%d val2=%c%c%c%c warn=%c truncate=%d\n", val1output, val2output[0], val2output[1], val2output[2], val2output[3], sqlca.sqlwarn[0], val2i);
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test1 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test1 ", ECPGt_EOIT, ECPGt_EORT);
#line 77 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 77 "desc.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 78 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 78 "desc.pgc"
ECPGdeallocate_desc(__LINE__, "indesc");
#line 80 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 80 "desc.pgc"
ECPGdeallocate_desc(__LINE__, "outdesc");
#line 81 "desc.pgc"
-if (sqlca.sqlcode < 0) sqlprint();
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 81 "desc.pgc"
#ifdef __cplusplus
}
#endif
-
#endif
#line 2 "dynalloc.pgc"
#line 4 "dynalloc.pgc"
-int main(void)
+int
+main(void)
{
- /* exec sql begin declare section */
-
-
-
-
-
-
-
-/* char **d8=0; */
-
-
-
-
-
-
-
-
-/* int *i8=0; */
-
-
+ /* exec sql begin declare section */
+
+
+
+
+
+
+
+/* char **d8=0; */
+
+
+
+
+
+
+
+
+/* int *i8=0; */
+
+
#line 9 "dynalloc.pgc"
- int * d1 = 0 ;
-
+ int *d1 = 0;
+
#line 10 "dynalloc.pgc"
- double * d2 = 0 ;
-
+ double *d2 = 0;
+
#line 11 "dynalloc.pgc"
- char ** d3 = 0 ;
-
+ char **d3 = 0;
+
#line 12 "dynalloc.pgc"
- char ** d4 = 0 ;
-
+ char **d4 = 0;
+
#line 13 "dynalloc.pgc"
- char ** d5 = 0 ;
-
+ char **d5 = 0;
+
#line 14 "dynalloc.pgc"
- char ** d6 = 0 ;
-
+ char **d6 = 0;
+
#line 15 "dynalloc.pgc"
- char ** d7 = 0 ;
-
+ char **d7 = 0;
+
#line 17 "dynalloc.pgc"
- char ** d9 = 0 ;
-
+ char **d9 = 0;
+
#line 18 "dynalloc.pgc"
- int * i1 = 0 ;
-
+ int *i1 = 0;
+
#line 19 "dynalloc.pgc"
- int * i2 = 0 ;
-
+ int *i2 = 0;
+
#line 20 "dynalloc.pgc"
- int * i3 = 0 ;
-
+ int *i3 = 0;
+
#line 21 "dynalloc.pgc"
- int * i4 = 0 ;
-
+ int *i4 = 0;
+
#line 22 "dynalloc.pgc"
- int * i5 = 0 ;
-
+ int *i5 = 0;
+
#line 23 "dynalloc.pgc"
- int * i6 = 0 ;
-
+ int *i6 = 0;
+
#line 24 "dynalloc.pgc"
- int * i7 = 0 ;
-
+ int *i7 = 0;
+
#line 26 "dynalloc.pgc"
- int * i9 = 0 ;
+ int *i9 = 0;
+
/* exec sql end declare section */
#line 27 "dynalloc.pgc"
- int i;
+ int i;
- ECPGdebug(1, stderr);
+ ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do sqlprint ( ) ; */
+ /* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 32 "dynalloc.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 33 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "dynalloc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to mdy", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to mdy", ECPGt_EOIT, ECPGt_EORT);
#line 35 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "dynalloc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a serial , b numeric ( 12 , 3 ) , c varchar , d varchar ( 3 ) , e char ( 4 ) , f timestamptz , g boolean , h box , i inet ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a serial , b numeric ( 12 , 3 ) , c varchar , d varchar ( 3 ) , e char ( 4 ) , f timestamptz , g boolean , h box , i inet ) ", ECPGt_EOIT, ECPGt_EORT);
#line 37 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "dynalloc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( b , c , d , e , f , g , h , i ) values( 23.456 , 'varchar' , 'v' , 'c' , '2003-03-03 12:33:07 PDT' , true , '(1,2,3,4)' , '2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( b , c , d , e , f , g , h , i ) values( 23.456 , 'varchar' , 'v' , 'c' , '2003-03-03 12:33:07 PDT' , true , '(1,2,3,4)' , '2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 38 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 38 "dynalloc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( b , c , d , e , f , g , h , i ) values( 2.446456 , null , 'v' , 'c' , '2003-03-03 12:33:07 PDT' , false , null , null ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( b , c , d , e , f , g , h , i ) values( 2.446456 , null , 'v' , 'c' , '2003-03-03 12:33:07 PDT' , false , null , null ) ", ECPGt_EOIT, ECPGt_EORT);
#line 39 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "dynalloc.pgc"
- ECPGallocate_desc(__LINE__, "mydesc");
+ ECPGallocate_desc(__LINE__, "mydesc");
#line 41 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 41 "dynalloc.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select a , b , c , d , e , f , g , h , i from test order by a", ECPGt_EOIT,
- ECPGt_descriptor, "mydesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select a , b , c , d , e , f , g , h , i from test order by a", ECPGt_EOIT,
+ ECPGt_descriptor, "mydesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 42 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 42 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 1,ECPGd_indicator,
- ECPGt_int,&(i1),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_int,&(d1),(long)1,(long)0,sizeof(int), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 1, ECPGd_indicator,
+ ECPGt_int, &(i1), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_int, &(d1), (long) 1, (long) 0, sizeof(int), ECPGd_EODT);
#line 43 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 43 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 2,ECPGd_indicator,
- ECPGt_int,&(i2),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_double,&(d2),(long)1,(long)0,sizeof(double), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 2, ECPGd_indicator,
+ ECPGt_int, &(i2), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_double, &(d2), (long) 1, (long) 0, sizeof(double), ECPGd_EODT);
#line 44 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 44 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 3,ECPGd_indicator,
- ECPGt_int,&(i3),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d3),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 3, ECPGd_indicator,
+ ECPGt_int, &(i3), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d3), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 45 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 45 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 4,ECPGd_indicator,
- ECPGt_int,&(i4),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d4),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 4, ECPGd_indicator,
+ ECPGt_int, &(i4), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d4), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 46 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 46 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 5,ECPGd_indicator,
- ECPGt_int,&(i5),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d5),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 5, ECPGd_indicator,
+ ECPGt_int, &(i5), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d5), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 47 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 47 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 6,ECPGd_indicator,
- ECPGt_int,&(i6),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d6),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 6, ECPGd_indicator,
+ ECPGt_int, &(i6), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d6), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 48 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 48 "dynalloc.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 7,ECPGd_indicator,
- ECPGt_int,&(i7),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d7),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 7, ECPGd_indicator,
+ ECPGt_int, &(i7), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d7), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 49 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 49 "dynalloc.pgc"
- /* skip box for now */
- /* exec sql get descriptor mydesc value 8 :d8=DATA, :i8=INDICATOR; */
- { ECPGget_desc(__LINE__, "mydesc", 9,ECPGd_indicator,
- ECPGt_int,&(i9),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(d9),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ /* skip box for now */
+ /* exec sql get descriptor mydesc value 8 :d8=DATA, :i8=INDICATOR; */
+ {
+ ECPGget_desc(__LINE__, "mydesc", 9, ECPGd_indicator,
+ ECPGt_int, &(i9), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(d9), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 52 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 52 "dynalloc.pgc"
- printf("Result:\n");
- for (i=0;i
- {
- if (i1[i]) printf("NULL, ");
- else printf("%d, ",d1[i]);
-
- if (i2[i]) printf("NULL, ");
- else printf("%f, ",d2[i]);
-
- if (i3[i]) printf("NULL, ");
- else printf("'%s', ",d3[i]);
-
- if (i4[i]) printf("NULL, ");
- else printf("'%s', ",d4[i]);
-
- if (i5[i]) printf("NULL, ");
- else printf("'%s', ",d5[i]);
-
- if (i6[i]) printf("NULL, ");
- else printf("'%s', ",d6[i]);
-
- if (i7[i]) printf("NULL, ");
- else printf("'%s', ",d7[i]);
-
- if (i9[i]) printf("NULL, ");
- else printf("'%s', ",d9[i]);
-
- printf("\n");
- }
- ECPGfree_auto_mem();
- printf("\n");
-
- ECPGdeallocate_desc(__LINE__, "mydesc");
+ printf("Result:\n");
+ for (i = 0; i < sqlca.sqlerrd[2]; ++i)
+ {
+ if (i1[i])
+ printf("NULL, ");
+ else
+ printf("%d, ", d1[i]);
+
+ if (i2[i])
+ printf("NULL, ");
+ else
+ printf("%f, ", d2[i]);
+
+ if (i3[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d3[i]);
+
+ if (i4[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d4[i]);
+
+ if (i5[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d5[i]);
+
+ if (i6[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d6[i]);
+
+ if (i7[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d7[i]);
+
+ if (i9[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", d9[i]);
+
+ printf("\n");
+ }
+ ECPGfree_auto_mem();
+ printf("\n");
+
+ ECPGdeallocate_desc(__LINE__, "mydesc");
#line 86 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 86 "dynalloc.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 87 "dynalloc.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 87 "dynalloc.pgc"
- return 0;
+ return 0;
}
#ifdef __cplusplus
}
#endif
-
#endif
#line 2 "dynalloc2.pgc"
#line 4 "dynalloc2.pgc"
-int main(void)
+int
+main(void)
{
- /* exec sql begin declare section */
-
-
-
-
-
-
+ /* exec sql begin declare section */
+
+
+
+
+
+
#line 9 "dynalloc2.pgc"
- int * ip1 = 0 ;
-
+ int *ip1 = 0;
+
#line 10 "dynalloc2.pgc"
- char ** cp2 = 0 ;
-
+ char **cp2 = 0;
+
#line 11 "dynalloc2.pgc"
- int * ipointer1 = 0 ;
-
+ int *ipointer1 = 0;
+
#line 12 "dynalloc2.pgc"
- int * ipointer2 = 0 ;
-
+ int *ipointer2 = 0;
+
#line 13 "dynalloc2.pgc"
- int colnum ;
+ int colnum;
+
/* exec sql end declare section */
#line 14 "dynalloc2.pgc"
- int i;
+ int i;
- ECPGdebug(1, stderr);
+ ECPGdebug(1, stderr);
- /* exec sql whenever sqlerror do sqlprint ( ) ; */
+ /* exec sql whenever sqlerror do sqlprint ( ) ; */
#line 19 "dynalloc2.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 20 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 20 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to postgres", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to postgres", ECPGt_EOIT, ECPGt_EORT);
#line 22 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 22 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 24 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 1 , 'one' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 1 , 'one' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 25 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 25 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 2 , 'two' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 2 , 'two' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 26 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 26 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'three' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , 'three' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 27 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 27 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 4 , 'four' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 4 , 'four' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 28 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 28 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 5 , null ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( 5 , null ) ", ECPGt_EOIT, ECPGt_EORT);
#line 29 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , null ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test values( null , null ) ", ECPGt_EOIT, ECPGt_EORT);
#line 30 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 30 "dynalloc2.pgc"
- ECPGallocate_desc(__LINE__, "mydesc");
+ ECPGallocate_desc(__LINE__, "mydesc");
#line 32 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 32 "dynalloc2.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
- ECPGt_descriptor, "mydesc", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from test ", ECPGt_EOIT,
+ ECPGt_descriptor, "mydesc", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 33 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "dynalloc2.pgc"
- { ECPGget_desc_header(__LINE__, "mydesc", &(colnum));
+ {
+ ECPGget_desc_header(__LINE__, "mydesc", &(colnum));
#line 34 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "dynalloc2.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 1,ECPGd_indicator,
- ECPGt_int,&(ipointer1),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_int,&(ip1),(long)1,(long)0,sizeof(int), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 1, ECPGd_indicator,
+ ECPGt_int, &(ipointer1), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_int, &(ip1), (long) 1, (long) 0, sizeof(int), ECPGd_EODT);
#line 35 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "dynalloc2.pgc"
- { ECPGget_desc(__LINE__, "mydesc", 2,ECPGd_indicator,
- ECPGt_int,&(ipointer2),(long)1,(long)0,sizeof(int), ECPGd_data,
- ECPGt_char,&(cp2),(long)0,(long)0,(1)*sizeof(char), ECPGd_EODT);
+ {
+ ECPGget_desc(__LINE__, "mydesc", 2, ECPGd_indicator,
+ ECPGt_int, &(ipointer2), (long) 1, (long) 0, sizeof(int), ECPGd_data,
+ ECPGt_char, &(cp2), (long) 0, (long) 0, (1) * sizeof(char), ECPGd_EODT);
#line 36 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "dynalloc2.pgc"
- printf("Result (%d columns):\n", colnum);
- for (i=0;i < sqlca.sqlerrd[2];++i)
- {
- if (ipointer1[i]) printf("NULL, ");
- else printf("%d, ",ip1[i]);
-
- if (ipointer2[i]) printf("NULL, ");
- else printf("'%s', ",cp2[i]);
- printf("\n");
- }
- ECPGfree_auto_mem();
- printf("\n");
-
- ECPGdeallocate_desc(__LINE__, "mydesc");
+ printf("Result (%d columns):\n", colnum);
+ for (i = 0; i < sqlca.sqlerrd[2]; ++i)
+ {
+ if (ipointer1[i])
+ printf("NULL, ");
+ else
+ printf("%d, ", ip1[i]);
+
+ if (ipointer2[i])
+ printf("NULL, ");
+ else
+ printf("'%s', ", cp2[i]);
+ printf("\n");
+ }
+ ECPGfree_auto_mem();
+ printf("\n");
+
+ ECPGdeallocate_desc(__LINE__, "mydesc");
#line 51 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );
+ if (sqlca.sqlcode < 0)
+ sqlprint();
#line 51 "dynalloc2.pgc"
- { ECPGtrans(__LINE__, NULL, "rollback");
+ {
+ ECPGtrans(__LINE__, NULL, "rollback");
#line 52 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 52 "dynalloc2.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 53 "dynalloc2.pgc"
-if (sqlca.sqlcode < 0) sqlprint ( );}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 53 "dynalloc2.pgc"
- return 0;
+ return 0;
}
SQL3_DDT_ILLEGAL /* not a datetime data type (not part of
* standard) */
};
-
#endif /* !_ECPG_SQL3TYPES_H */
#line 7 "dyntest.pgc"
#ifdef __cplusplus
}
#endif
-
#endif
#line 8 "dyntest.pgc"
static void
-error (void)
+error(void)
{
- printf ("\n#%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
- exit (1);
+ printf("\n#%ld:%s\n", sqlca.sqlcode, sqlca.sqlerrm.sqlerrmc);
+ exit(1);
}
int
-main (int argc, char **argv)
+main(int argc, char **argv)
{
- /* exec sql begin declare section */
-
-
-
-
-
-
-
-
-
-
-
+ /* exec sql begin declare section */
+
+
+
+
+
+
+
+
+
+
+
#line 22 "dyntest.pgc"
- int COUNT ;
-
+ int COUNT;
+
#line 23 "dyntest.pgc"
- int INTVAR ;
-
+ int INTVAR;
+
#line 24 "dyntest.pgc"
- int INDEX ;
-
+ int INDEX;
+
#line 25 "dyntest.pgc"
- int INDICATOR ;
-
+ int INDICATOR;
+
#line 26 "dyntest.pgc"
- int TYPE , LENGTH , OCTET_LENGTH , PRECISION , SCALE , RETURNED_OCTET_LENGTH ;
-
+ int TYPE,
+ LENGTH,
+ OCTET_LENGTH,
+ PRECISION,
+ SCALE,
+ RETURNED_OCTET_LENGTH;
+
#line 27 "dyntest.pgc"
- int DATETIME_INTERVAL_CODE ;
-
+ int DATETIME_INTERVAL_CODE;
+
#line 28 "dyntest.pgc"
- char NAME [ 120 ] , BOOLVAR ;
-
+ char NAME[120],
+ BOOLVAR;
+
#line 29 "dyntest.pgc"
- char STRINGVAR [ 1024 ] ;
-
+ char STRINGVAR[1024];
+
#line 30 "dyntest.pgc"
- double DOUBLEVAR ;
-
+ double DOUBLEVAR;
+
#line 31 "dyntest.pgc"
- char * QUERY ;
+ char *QUERY;
+
/* exec sql end declare section */
#line 32 "dyntest.pgc"
- int done = 0;
+ int done = 0;
- /* exec sql var BOOLVAR is bool */
+ /* exec sql var BOOLVAR is bool */
#line 35 "dyntest.pgc"
- ECPGdebug (1, stderr);
+ ECPGdebug(1, stderr);
- QUERY = "select * from dyntest";
+ QUERY = "select * from dyntest";
- /* exec sql whenever sqlerror do error ( ) ; */
+ /* exec sql whenever sqlerror do error ( ) ; */
#line 43 "dyntest.pgc"
- ECPGallocate_desc(__LINE__, "MYDESC");
+ ECPGallocate_desc(__LINE__, "MYDESC");
#line 45 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );
+ if (sqlca.sqlcode < 0)
+ error();
#line 45 "dyntest.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
#line 47 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 47 "dyntest.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to german", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set datestyle to german", ECPGt_EOIT, ECPGt_EORT);
#line 49 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 49 "dyntest.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table dyntest ( name char ( 14 ) , d float8 , i int , bignumber int8 , b boolean , comment text , day date ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table dyntest ( name char ( 14 ) , d float8 , i int , bignumber int8 , b boolean , comment text , day date ) ", ECPGt_EOIT, ECPGt_EORT);
#line 53 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 53 "dyntest.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into dyntest values( 'first entry' , 14.7 , 14 , 123045607890 , true , 'The world''s most advanced open source database.' , '1987-07-14' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into dyntest values( 'first entry' , 14.7 , 14 , 123045607890 , true , 'The world''s most advanced open source database.' , '1987-07-14' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 54 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 54 "dyntest.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into dyntest values( 'second entry' , 1407.87 , 1407 , 987065403210 , false , 'The elephant never forgets.' , '1999-11-5' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into dyntest values( 'second entry' , 1407.87 , 1407 , 987065403210 , false , 'The elephant never forgets.' , '1999-11-5' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 55 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 55 "dyntest.pgc"
- { ECPGprepare(__LINE__, "MYQUERY" , QUERY);
+ {
+ ECPGprepare(__LINE__, "MYQUERY", QUERY);
#line 57 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 57 "dyntest.pgc"
- /* declare MYCURS cursor for ? */
+ /* declare MYCURS cursor for ? */
#line 58 "dyntest.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare MYCURS cursor for ?",
- ECPGt_char_variable,(ECPGprepared_statement("MYQUERY")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare MYCURS cursor for ?",
+ ECPGt_char_variable, (ECPGprepared_statement("MYQUERY")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 60 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 60 "dyntest.pgc"
- while (1)
- {
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch in MYCURS", ECPGt_EOIT,
- ECPGt_descriptor, "MYDESC", 0L, 0L, 0L,
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ while (1)
+ {
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch in MYCURS", ECPGt_EOIT,
+ ECPGt_descriptor, "MYDESC", 0L, 0L, 0L,
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 64 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 64 "dyntest.pgc"
- if (sqlca.sqlcode)
- break;
+ if (sqlca.sqlcode)
+ break;
- { ECPGget_desc_header(__LINE__, "MYDESC", &(COUNT));
+ {
+ ECPGget_desc_header(__LINE__, "MYDESC", &(COUNT));
#line 69 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 69 "dyntest.pgc"
- if (!done)
- {
- printf ("Found %d columns\n", COUNT);
- done = 1;
- }
+ if (!done)
+ {
+ printf("Found %d columns\n", COUNT);
+ done = 1;
+ }
- for (INDEX = 1; INDEX <= COUNT; ++INDEX)
- {
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_indicator,
- ECPGt_int,&(INDICATOR),(long)1,(long)1,sizeof(int), ECPGd_name,
- ECPGt_char,(NAME),(long)120,(long)1,(120)*sizeof(char), ECPGd_scale,
- ECPGt_int,&(SCALE),(long)1,(long)1,sizeof(int), ECPGd_precision,
- ECPGt_int,&(PRECISION),(long)1,(long)1,sizeof(int), ECPGd_ret_octet,
- ECPGt_int,&(RETURNED_OCTET_LENGTH),(long)1,(long)1,sizeof(int), ECPGd_octet,
- ECPGt_int,&(OCTET_LENGTH),(long)1,(long)1,sizeof(int), ECPGd_length,
- ECPGt_int,&(LENGTH),(long)1,(long)1,sizeof(int), ECPGd_type,
- ECPGt_int,&(TYPE),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ for (INDEX = 1; INDEX <= COUNT; ++INDEX)
+ {
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_indicator,
+ ECPGt_int, &(INDICATOR), (long) 1, (long) 1, sizeof(int), ECPGd_name,
+ ECPGt_char, (NAME), (long) 120, (long) 1, (120) * sizeof(char), ECPGd_scale,
+ ECPGt_int, &(SCALE), (long) 1, (long) 1, sizeof(int), ECPGd_precision,
+ ECPGt_int, &(PRECISION), (long) 1, (long) 1, sizeof(int), ECPGd_ret_octet,
+ ECPGt_int, &(RETURNED_OCTET_LENGTH), (long) 1, (long) 1, sizeof(int), ECPGd_octet,
+ ECPGt_int, &(OCTET_LENGTH), (long) 1, (long) 1, sizeof(int), ECPGd_length,
+ ECPGt_int, &(LENGTH), (long) 1, (long) 1, sizeof(int), ECPGd_type,
+ ECPGt_int, &(TYPE), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 86 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 86 "dyntest.pgc"
- printf ("%2d\t%s (type: %d length: %d precision: %d scale: %d = " , INDEX, NAME, TYPE, LENGTH, PRECISION, SCALE);
- switch (TYPE)
- {
- case SQL3_BOOLEAN:
- printf ("bool");
- break;
- case SQL3_NUMERIC:
- printf ("numeric(%d,%d)", PRECISION, SCALE);
- break;
- case SQL3_DECIMAL:
- printf ("decimal(%d,%d)", PRECISION, SCALE);
- break;
- case SQL3_INTEGER:
- printf ("integer");
- break;
- case SQL3_SMALLINT:
- printf ("smallint");
- break;
- case SQL3_FLOAT:
- printf ("float(%d,%d)", PRECISION, SCALE);
- break;
- case SQL3_REAL:
- printf ("real");
- break;
- case SQL3_DOUBLE_PRECISION:
- printf ("double precision");
- break;
- case SQL3_DATE_TIME_TIMESTAMP:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_di_code,
- ECPGt_int,&(DATETIME_INTERVAL_CODE),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ printf("%2d\t%s (type: %d length: %d precision: %d scale: %d = ", INDEX, NAME, TYPE, LENGTH, PRECISION, SCALE);
+ switch (TYPE)
+ {
+ case SQL3_BOOLEAN:
+ printf("bool");
+ break;
+ case SQL3_NUMERIC:
+ printf("numeric(%d,%d)", PRECISION, SCALE);
+ break;
+ case SQL3_DECIMAL:
+ printf("decimal(%d,%d)", PRECISION, SCALE);
+ break;
+ case SQL3_INTEGER:
+ printf("integer");
+ break;
+ case SQL3_SMALLINT:
+ printf("smallint");
+ break;
+ case SQL3_FLOAT:
+ printf("float(%d,%d)", PRECISION, SCALE);
+ break;
+ case SQL3_REAL:
+ printf("real");
+ break;
+ case SQL3_DOUBLE_PRECISION:
+ printf("double precision");
+ break;
+ case SQL3_DATE_TIME_TIMESTAMP:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_di_code,
+ ECPGt_int, &(DATETIME_INTERVAL_CODE), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 116 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 116 "dyntest.pgc"
- switch (DATETIME_INTERVAL_CODE)
- {
- case SQL3_DDT_DATE:
- printf ("date");
- break;
- case SQL3_DDT_TIME:
- printf ("time");
- break;
- case SQL3_DDT_TIMESTAMP:
- printf ("timestamp");
- break;
- case SQL3_DDT_TIME_WITH_TIME_ZONE:
- printf ("time with time zone");
- break;
- case SQL3_DDT_TIMESTAMP_WITH_TIME_ZONE:
- printf ("timestamp with time zone");
- break;
- }
- break;
- case SQL3_INTERVAL:
- printf ("interval");
- break;
- case SQL3_CHARACTER:
- if (LENGTH > 0)
- printf ("char(%d)", LENGTH);
- else
- printf ("text");
- break;
- case SQL3_CHARACTER_VARYING:
- if (LENGTH > 0)
- printf ("varchar(%d)", LENGTH);
- else
- printf ("varchar()");
- break;
- default:
- if (TYPE < 0)
- printf ("", -TYPE);
- else
- printf ("", TYPE);
- break;
- }
- printf (")\n\toctet_length: %d returned_octet_length: %d)\n\t= ",
- OCTET_LENGTH, RETURNED_OCTET_LENGTH);
- if (INDICATOR == -1)
- printf ("NULL\n");
- else
- switch (TYPE)
- {
- case SQL3_BOOLEAN:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_bool,&(BOOLVAR),(long)1,(long)1,sizeof(bool), ECPGd_EODT);
+ switch (DATETIME_INTERVAL_CODE)
+ {
+ case SQL3_DDT_DATE:
+ printf("date");
+ break;
+ case SQL3_DDT_TIME:
+ printf("time");
+ break;
+ case SQL3_DDT_TIMESTAMP:
+ printf("timestamp");
+ break;
+ case SQL3_DDT_TIME_WITH_TIME_ZONE:
+ printf("time with time zone");
+ break;
+ case SQL3_DDT_TIMESTAMP_WITH_TIME_ZONE:
+ printf("timestamp with time zone");
+ break;
+ }
+ break;
+ case SQL3_INTERVAL:
+ printf("interval");
+ break;
+ case SQL3_CHARACTER:
+ if (LENGTH > 0)
+ printf("char(%d)", LENGTH);
+ else
+ printf("text");
+ break;
+ case SQL3_CHARACTER_VARYING:
+ if (LENGTH > 0)
+ printf("varchar(%d)", LENGTH);
+ else
+ printf("varchar()");
+ break;
+ default:
+ if (TYPE < 0)
+ printf("", -TYPE);
+ else
+ printf("", TYPE);
+ break;
+ }
+ printf(")\n\toctet_length: %d returned_octet_length: %d)\n\t= ",
+ OCTET_LENGTH, RETURNED_OCTET_LENGTH);
+ if (INDICATOR == -1)
+ printf("NULL\n");
+ else
+ switch (TYPE)
+ {
+ case SQL3_BOOLEAN:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_bool, &(BOOLVAR), (long) 1, (long) 1, sizeof(bool), ECPGd_EODT);
#line 166 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 166 "dyntest.pgc"
- printf ("%s\n", BOOLVAR ? "true" : "false");
- break;
- case SQL3_INTEGER:
- case SQL3_SMALLINT:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_int,&(INTVAR),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ printf("%s\n", BOOLVAR ? "true" : "false");
+ break;
+ case SQL3_INTEGER:
+ case SQL3_SMALLINT:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_int, &(INTVAR), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 171 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 171 "dyntest.pgc"
- printf ("%d\n", INTVAR);
- break;
- case SQL3_DOUBLE_PRECISION:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_double,&(DOUBLEVAR),(long)1,(long)1,sizeof(double), ECPGd_EODT);
+ printf("%d\n", INTVAR);
+ break;
+ case SQL3_DOUBLE_PRECISION:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_double, &(DOUBLEVAR), (long) 1, (long) 1, sizeof(double), ECPGd_EODT);
#line 175 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 175 "dyntest.pgc"
- printf ("%.*f\n", PRECISION, DOUBLEVAR);
- break;
- case SQL3_DATE_TIME_TIMESTAMP:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_char,(STRINGVAR),(long)1024,(long)1,(1024)*sizeof(char), ECPGd_di_code,
- ECPGt_int,&(DATETIME_INTERVAL_CODE),(long)1,(long)1,sizeof(int), ECPGd_EODT);
+ printf("%.*f\n", PRECISION, DOUBLEVAR);
+ break;
+ case SQL3_DATE_TIME_TIMESTAMP:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_char, (STRINGVAR), (long) 1024, (long) 1, (1024) * sizeof(char), ECPGd_di_code,
+ ECPGt_int, &(DATETIME_INTERVAL_CODE), (long) 1, (long) 1, sizeof(int), ECPGd_EODT);
#line 181 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 181 "dyntest.pgc"
- printf ("%d \"%s\"\n", DATETIME_INTERVAL_CODE, STRINGVAR);
- break;
- case SQL3_CHARACTER:
- case SQL3_CHARACTER_VARYING:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_char,(STRINGVAR),(long)1024,(long)1,(1024)*sizeof(char), ECPGd_EODT);
+ printf("%d \"%s\"\n", DATETIME_INTERVAL_CODE, STRINGVAR);
+ break;
+ case SQL3_CHARACTER:
+ case SQL3_CHARACTER_VARYING:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_char, (STRINGVAR), (long) 1024, (long) 1, (1024) * sizeof(char), ECPGd_EODT);
#line 186 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 186 "dyntest.pgc"
- printf ("\"%s\"\n", STRINGVAR);
- break;
- default:
- { ECPGget_desc(__LINE__, "MYDESC", INDEX,ECPGd_data,
- ECPGt_char,(STRINGVAR),(long)1024,(long)1,(1024)*sizeof(char), ECPGd_EODT);
+ printf("\"%s\"\n", STRINGVAR);
+ break;
+ default:
+ {
+ ECPGget_desc(__LINE__, "MYDESC", INDEX, ECPGd_data,
+ ECPGt_char, (STRINGVAR), (long) 1024, (long) 1, (1024) * sizeof(char), ECPGd_EODT);
#line 190 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 190 "dyntest.pgc"
- printf ("<\"%s\">\n", STRINGVAR);
- break;
- }
+ printf("<\"%s\">\n", STRINGVAR);
+ break;
+ }
+ }
}
- }
- { ECPGdo(__LINE__, 0, 1, NULL, "close MYCURS", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close MYCURS", ECPGt_EOIT, ECPGt_EORT);
#line 197 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );}
+ if (sqlca.sqlcode < 0)
+ error();
+ }
#line 197 "dyntest.pgc"
- ECPGdeallocate_desc(__LINE__, "MYDESC");
+ ECPGdeallocate_desc(__LINE__, "MYDESC");
#line 199 "dyntest.pgc"
-if (sqlca.sqlcode < 0) error ( );
+ if (sqlca.sqlcode < 0)
+ error();
#line 199 "dyntest.pgc"
- return 0;
- }
+ return 0;
+}
main(void)
{
/* exec sql begin declare section */
-
-
-
-
-
+
+
+
+
+
#line 14 "execute.pgc"
- int amount [ 8 ] ;
-
+ int amount[8];
+
#line 15 "execute.pgc"
- int increment = 100 ;
-
+ int increment = 100;
+
#line 16 "execute.pgc"
- char name [ 8 ] [ 8 ] ;
-
+ char name[8][8];
+
#line 17 "execute.pgc"
- char letter [ 8 ] [ 1 ] ;
-
+ char letter[8][1];
+
#line 18 "execute.pgc"
- char command [ 128 ] ;
+ char command[128];
+
/* exec sql end declare section */
#line 19 "execute.pgc"
- int i,j;
+ int i,
+ j;
ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , "main", 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, "main", 0);
#line 24 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( name char ( 8 ) , amount int , letter char ( 1 ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( name char ( 8 ) , amount int , letter char ( 1 ) ) ", ECPGt_EOIT, ECPGt_EORT);
#line 25 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 25 "execute.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 26 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 26 "execute.pgc"
sprintf(command, "insert into test (name, amount, letter) values ('db: ''r1''', 1, 'f')");
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(command),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (command), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 29 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "execute.pgc"
sprintf(command, "insert into test (name, amount, letter) values ('db: ''r1''', 2, 't')");
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(command),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (command), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 32 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "execute.pgc"
sprintf(command, "insert into test (name, amount, letter) select name, amount+10, letter from test");
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(command),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (command), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 35 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "execute.pgc"
printf("Inserted %ld tuples via execute immediate\n", sqlca.sqlerrd[2]);
sprintf(command, "insert into test (name, amount, letter) select name, amount+?, letter from test");
- { ECPGprepare(__LINE__, "I" , command);
+ {
+ ECPGprepare(__LINE__, "I", command);
#line 40 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 40 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "?",
- ECPGt_char_variable,(ECPGprepared_statement("I")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(increment),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "?",
+ ECPGt_char_variable, (ECPGprepared_statement("I")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(increment), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 41 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 41 "execute.pgc"
printf("Inserted %ld tuples via prepared execute\n", sqlca.sqlerrd[2]);
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 45 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 45 "execute.pgc"
- sprintf (command, "select * from test");
+ sprintf(command, "select * from test");
- { ECPGprepare(__LINE__, "F" , command);
+ {
+ ECPGprepare(__LINE__, "F", command);
#line 49 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 49 "execute.pgc"
- /* declare CUR cursor for ? */
+ /* declare CUR cursor for ? */
#line 50 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare CUR cursor for ?",
- ECPGt_char_variable,(ECPGprepared_statement("F")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare CUR cursor for ?",
+ ECPGt_char_variable, (ECPGprepared_statement("F")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 52 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 52 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch 8 in CUR", ECPGt_EOIT,
- ECPGt_char,(name),(long)8,(long)8,(8)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,(amount),(long)1,(long)8,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(letter),(long)1,(long)8,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch 8 in CUR", ECPGt_EOIT,
+ ECPGt_char, (name), (long) 8, (long) 8, (8) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, (amount), (long) 1, (long) 8, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (letter), (long) 1, (long) 8, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 53 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 53 "execute.pgc"
- for (i=0, j=sqlca.sqlerrd[2]; i<j; i++)
+ for (i = 0, j = sqlca.sqlerrd[2]; i < j; i++)
{
/* exec sql begin declare section */
-
-
-
+
+
+
#line 58 "execute.pgc"
- char n [ 8 ] , l = letter [ i ] [ 0 ] ;
-
+ char n[8],
+ l = letter[i][0];
+
#line 59 "execute.pgc"
- int a = amount [ i ] ;
+ int a = amount[i];
+
/* exec sql end declare section */
#line 60 "execute.pgc"
printf("name[%d]=%8.8s\tamount[%d]=%d\tletter[%d]=%c\n", i, n, i, a, i, l);
}
- { ECPGdo(__LINE__, 0, 1, NULL, "close CUR", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close CUR", ECPGt_EOIT, ECPGt_EORT);
#line 66 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 66 "execute.pgc"
- sprintf (command, "select * from test where amount = ?");
+ sprintf(command, "select * from test where amount = ?");
- { ECPGprepare(__LINE__, "F" , command);
+ {
+ ECPGprepare(__LINE__, "F", command);
#line 70 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 70 "execute.pgc"
/* declare CUR2 cursor for ? */
#line 71 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare CUR2 cursor for ?",
- ECPGt_char_variable,(ECPGprepared_statement("F")),(long)1,(long)1,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_const,"1",(long)1,(long)1,strlen("1"),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare CUR2 cursor for ?",
+ ECPGt_char_variable, (ECPGprepared_statement("F")), (long) 1, (long) 1, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_const, "1", (long) 1, (long) 1, strlen("1"),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 73 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 73 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch in CUR2", ECPGt_EOIT,
- ECPGt_char,(name),(long)8,(long)8,(8)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,(amount),(long)1,(long)8,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(letter),(long)1,(long)8,(1)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch in CUR2", ECPGt_EOIT,
+ ECPGt_char, (name), (long) 8, (long) 8, (8) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, (amount), (long) 1, (long) 8, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (letter), (long) 1, (long) 8, (1) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 74 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 74 "execute.pgc"
- for (i=0, j=sqlca.sqlerrd[2]; i<j; i++)
+ for (i = 0, j = sqlca.sqlerrd[2]; i < j; i++)
{
/* exec sql begin declare section */
-
-
-
+
+
+
#line 79 "execute.pgc"
- char n [ 8 ] , l = letter [ i ] [ 0 ] ;
-
+ char n[8],
+ l = letter[i][0];
+
#line 80 "execute.pgc"
- int a = amount [ i ] ;
+ int a = amount[i];
+
/* exec sql end declare section */
#line 81 "execute.pgc"
printf("name[%d]=%8.8s\tamount[%d]=%d\tletter[%d]=%c\n", i, n, i, a, i, l);
}
- { ECPGdo(__LINE__, 0, 1, NULL, "close CUR2", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close CUR2", ECPGt_EOIT, ECPGt_EORT);
#line 87 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 87 "execute.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
#line 88 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 88 "execute.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 89 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 89 "execute.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
#line 90 "execute.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 90 "execute.pgc"
#line 5 "fetch.pgc"
-int main(int argc, char* argv[]) {
- /* exec sql begin declare section */
-
-
-
+int
+main(int argc, char *argv[])
+{
+ /* exec sql begin declare section */
+
+
+
#line 9 "fetch.pgc"
- char str [ 25 ] ;
-
+ char str[25];
+
#line 10 "fetch.pgc"
- int i , how_many = 1 ;
+ int i,
+ how_many = 1;
+
/* exec sql end declare section */
#line 11 "fetch.pgc"
- ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ ECPGdebug(1, stderr);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 14 "fetch.pgc"
- /* exec sql whenever sql_warning sqlprint ; */
+ /* exec sql whenever sql_warning sqlprint ; */
#line 16 "fetch.pgc"
- /* exec sql whenever sqlerror sqlprint ; */
+ /* exec sql whenever sqlerror sqlprint ; */
#line 17 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table My_Table ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table My_Table ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 19 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 19 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 19 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 1 , 'text1' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 1 , 'text1' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 21 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 21 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 21 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 2 , 'text2' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 2 , 'text2' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 22 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 22 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 22 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 3 , 'text3' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 3 , 'text3' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 23 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 23 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 23 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 4 , 'text4' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values ( 4 , 'text4' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 24 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 24 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "fetch.pgc"
- /* declare C cursor for select * from My_Table */
+ /* declare C cursor for select * from My_Table */
#line 26 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "declare C cursor for select * from My_Table ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "declare C cursor for select * from My_Table ", ECPGt_EOIT, ECPGt_EORT);
#line 28 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 28 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 28 "fetch.pgc"
- /* exec sql whenever not found break ; */
+ /* exec sql whenever not found break ; */
#line 30 "fetch.pgc"
- while (1) {
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch 1 in C", ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(str),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ while (1)
+ {
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch 1 in C", ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (str), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 32 "fetch.pgc"
-if (sqlca.sqlcode == ECPG_NOT_FOUND) break;
+ if (sqlca.sqlcode == ECPG_NOT_FOUND)
+ break;
#line 32 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 32 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "fetch.pgc"
- printf("%d: %s\n", i, str);
- }
+ printf("%d: %s\n", i, str);
+ }
- /* exec sql whenever not found continue ; */
+ /* exec sql whenever not found continue ; */
#line 36 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "move backward 2 in C", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "move backward 2 in C", ECPGt_EOIT, ECPGt_EORT);
#line 37 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 37 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "fetch ? in C",
- ECPGt_int,&(how_many),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT,
- ECPGt_int,&(i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_char,(str),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "fetch ? in C",
+ ECPGt_int, &(how_many), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT,
+ ECPGt_int, &(i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_char, (str), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 39 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 39 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "fetch.pgc"
- printf("%d: %s\n", i, str);
+ printf("%d: %s\n", i, str);
- { ECPGdo(__LINE__, 0, 1, NULL, "close C", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "close C", ECPGt_EOIT, ECPGt_EORT);
#line 42 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 42 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 42 "fetch.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table My_Table ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table My_Table ", ECPGt_EOIT, ECPGt_EORT);
#line 44 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 44 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 44 "fetch.pgc"
- { ECPGdisconnect(__LINE__, "ALL");
+ {
+ ECPGdisconnect(__LINE__, "ALL");
#line 46 "fetch.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 46 "fetch.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 46 "fetch.pgc"
- return 0;
+ return 0;
}
#line 5 "func.pgc"
-int main(int argc, char* argv[]) {
-
- ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+int
+main(int argc, char *argv[])
+{
+
+ ECPGdebug(1, stderr);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 10 "func.pgc"
- { ECPGsetcommit(__LINE__, "on", NULL);}
+ {
+ ECPGsetcommit(__LINE__, "on", NULL);
+ }
#line 12 "func.pgc"
- /* exec sql whenever sql_warning sqlprint ; */
+ /* exec sql whenever sql_warning sqlprint ; */
#line 13 "func.pgc"
- /* exec sql whenever sqlerror sqlprint ; */
+ /* exec sql whenever sqlerror sqlprint ; */
#line 14 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table My_Table ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table My_Table ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 16 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 16 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 16 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create function My_Table_Check () returns trigger as $test$\
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create function My_Table_Check () returns trigger as $test$\
BEGIN\
RAISE WARNING 'Notice: TG_NAME=%, TG WHEN=%', TG_NAME, TG_WHEN;\
RETURN NEW;\
END; $test$ language plpgsql", ECPGt_EOIT, ECPGt_EORT);
#line 24 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 24 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create trigger My_Table_Check_Trigger before insert on My_Table for each row execute procedure My_Table_Check ( )", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create trigger My_Table_Check_Trigger before insert on My_Table for each row execute procedure My_Table_Check ( )", ECPGt_EOIT, ECPGt_EORT);
#line 30 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 30 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 30 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values( 1234 , 'Some random text' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values( 1234 , 'Some random text' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 32 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 32 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values( 5678 , 'The Quick Brown' ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into My_Table values( 5678 , 'The Quick Brown' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 33 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 33 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 33 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop trigger My_Table_Check_Trigger on My_Table ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop trigger My_Table_Check_Trigger on My_Table ", ECPGt_EOIT, ECPGt_EORT);
#line 35 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 35 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 35 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop function My_Table_Check () ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop function My_Table_Check () ", ECPGt_EOIT, ECPGt_EORT);
#line 36 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 36 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "func.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table My_Table ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table My_Table ", ECPGt_EOIT, ECPGt_EORT);
#line 37 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 37 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 37 "func.pgc"
- { ECPGdisconnect(__LINE__, "ALL");
+ {
+ ECPGdisconnect(__LINE__, "ALL");
#line 39 "func.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 39 "func.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 39 "func.pgc"
- return 0;
+ return 0;
}
#ifdef __cplusplus
}
#endif
-
#endif
#line 3 "indicators.pgc"
-int main(int argc, char **argv)
+int
+main(int argc, char **argv)
{
/* exec sql begin declare section */
-
-
-
+
+
+
#line 10 "indicators.pgc"
- int intvar = 5 ;
-
+ int intvar = 5;
+
#line 11 "indicators.pgc"
- int nullind = - 1 ;
+ int nullind = -1;
+
/* exec sql end declare section */
#line 12 "indicators.pgc"
- ECPGdebug(1,stderr);
+ ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 16 "indicators.pgc"
- { ECPGsetcommit(__LINE__, "off", NULL);}
+ {
+ ECPGsetcommit(__LINE__, "off", NULL);
+ }
#line 17 "indicators.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( \"id\" int primary key , \"str\" text not null , val int null ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( \"id\" int primary key , \"str\" text not null , val int null ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 22 "indicators.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 23 "indicators.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 1 , 'Hello' , 0 ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 1 , 'Hello' , 0 ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 25 "indicators.pgc"
/* use indicator in insert */
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 2 , 'Hi there' , ? ) ",
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 2 , 'Hi there' , ? ) ",
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EOIT, ECPGt_EORT);
+ }
#line 28 "indicators.pgc"
nullind = 0;
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 3 , 'Good evening' , ? ) ",
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( id , str , val ) values( 3 , 'Good evening' , ? ) ",
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EOIT, ECPGt_EORT);
+ }
#line 30 "indicators.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 31 "indicators.pgc"
/* use indicators to get information about selects */
- { ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 1 ", ECPGt_EOIT,
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 1 ", ECPGt_EOIT,
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 34 "indicators.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 2 ", ECPGt_EOIT,
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 2 ", ECPGt_EOIT,
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
+ }
#line 35 "indicators.pgc"
printf("intvar: %d, nullind: %d\n", intvar, nullind);
- { ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 3 ", ECPGt_EOIT,
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 3 ", ECPGt_EOIT,
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
+ }
#line 37 "indicators.pgc"
printf("intvar: %d, nullind: %d\n", intvar, nullind);
/* use indicators for update */
- intvar = 5; nullind = -1;
- { ECPGdo(__LINE__, 0, 1, NULL, "update test set val = ? where id = 1 ",
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EOIT, ECPGt_EORT);}
+ intvar = 5;
+ nullind = -1;
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "update test set val = ? where id = 1 ",
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EOIT, ECPGt_EORT);
+ }
#line 42 "indicators.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 1 ", ECPGt_EOIT,
- ECPGt_int,&(intvar),(long)1,(long)1,sizeof(int),
- ECPGt_int,&(nullind),(long)1,(long)1,sizeof(int), ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select val from test where id = 1 ", ECPGt_EOIT,
+ ECPGt_int, &(intvar), (long) 1, (long) 1, sizeof(int),
+ ECPGt_int, &(nullind), (long) 1, (long) 1, sizeof(int), ECPGt_EORT);
+ }
#line 43 "indicators.pgc"
printf("intvar: %d, nullind: %d\n", intvar, nullind);
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 46 "indicators.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 47 "indicators.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 49 "indicators.pgc"
return 0;
#line 5 "quote.pgc"
-int main(int argc, char* argv[]) {
- /* exec sql begin declare section */
-
-
+int
+main(int argc, char *argv[])
+{
+ /* exec sql begin declare section */
+
+
#line 9 "quote.pgc"
- char var [ 25 ] ;
+ char var[25];
+
/* exec sql end declare section */
#line 10 "quote.pgc"
- ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ ECPGdebug(1, stderr);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 13 "quote.pgc"
- { ECPGsetcommit(__LINE__, "on", NULL);}
+ {
+ ECPGsetcommit(__LINE__, "on", NULL);
+ }
#line 15 "quote.pgc"
- /* exec sql whenever sql_warning sqlprint ; */
+ /* exec sql whenever sql_warning sqlprint ; */
#line 16 "quote.pgc"
- /* exec sql whenever sqlerror sqlprint ; */
+ /* exec sql whenever sqlerror sqlprint ; */
#line 17 "quote.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table \"My_Table\" ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table \"My_Table\" ( Item1 int , Item2 text ) ", ECPGt_EOIT, ECPGt_EORT);
#line 19 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 19 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 19 "quote.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "show standard_conforming_strings", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show standard_conforming_strings", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 21 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 21 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 21 "quote.pgc"
- printf("Standard conforming strings: %s\n", var);
+ printf("Standard conforming strings: %s\n", var);
- /* this is a\\b actually */
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
+ /* this is a\\b actually */
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 25 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 25 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 25 "quote.pgc"
- /* this is a\b */
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
+ /* this is a\b */
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 27 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 27 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 27 "quote.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "set standard_conforming_strings to on", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "set standard_conforming_strings to on", ECPGt_EOIT, ECPGt_EORT);
#line 29 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 29 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 29 "quote.pgc"
- /* this is a\\b actually */
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
+ /* this is a\\b actually */
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 32 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 32 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "quote.pgc"
- /* this is a\b */
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
+ /* this is a\b */
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into \"My_Table\" values ( 1 , 'a\\\\b' ) ", ECPGt_EOIT, ECPGt_EORT);
#line 34 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 34 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 34 "quote.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select * from \"My_Table\" ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select * from \"My_Table\" ", ECPGt_EOIT, ECPGt_EORT);
#line 36 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 36 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "quote.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table \"My_Table\" ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table \"My_Table\" ", ECPGt_EOIT, ECPGt_EORT);
#line 38 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 38 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 38 "quote.pgc"
- { ECPGdisconnect(__LINE__, "ALL");
+ {
+ ECPGdisconnect(__LINE__, "ALL");
#line 40 "quote.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 40 "quote.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 40 "quote.pgc"
- return 0;
+ return 0;
}
#line 5 "show.pgc"
-int main(int argc, char* argv[]) {
- /* exec sql begin declare section */
-
-
+int
+main(int argc, char *argv[])
+{
+ /* exec sql begin declare section */
+
+
#line 9 "show.pgc"
- char var [ 25 ] ;
+ char var[25];
+
/* exec sql end declare section */
#line 10 "show.pgc"
- ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ ECPGdebug(1, stderr);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 13 "show.pgc"
- /* exec sql whenever sql_warning sqlprint ; */
+ /* exec sql whenever sql_warning sqlprint ; */
#line 15 "show.pgc"
- /* exec sql whenever sqlerror sqlprint ; */
+ /* exec sql whenever sqlerror sqlprint ; */
#line 16 "show.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "show search_path", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show search_path", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 18 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 18 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 18 "show.pgc"
- printf("Var: Search path: %s\n", var);
+ printf("Var: Search path: %s\n", var);
- { ECPGdo(__LINE__, 0, 1, NULL, "show wal_buffers", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show wal_buffers", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 21 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 21 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 21 "show.pgc"
- printf("Var: WAL buffers: %s\n", var);
+ printf("Var: WAL buffers: %s\n", var);
- { ECPGdo(__LINE__, 0, 1, NULL, "show standard_conforming_strings", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show standard_conforming_strings", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 24 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 24 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "show.pgc"
- printf("Var: Standard conforming strings: %s\n", var);
+ printf("Var: Standard conforming strings: %s\n", var);
- { ECPGdo(__LINE__, 0, 1, NULL, "show time zone", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show time zone", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 27 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 27 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 27 "show.pgc"
- printf("Time Zone: %s\n", var);
+ printf("Time Zone: %s\n", var);
- { ECPGdo(__LINE__, 0, 1, NULL, "show transaction isolation level", ECPGt_EOIT,
- ECPGt_char,(var),(long)25,(long)1,(25)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "show transaction isolation level", ECPGt_EOIT,
+ ECPGt_char, (var), (long) 25, (long) 1, (25) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 30 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 30 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 30 "show.pgc"
- printf("Transaction isolation level: %s\n", var);
+ printf("Transaction isolation level: %s\n", var);
- /* Do not ask for the user name, it may differ in a regression test */
- /* EXEC SQL SHOW SESSION AUTHORIZATION INTO :var; */
+ /* Do not ask for the user name, it may differ in a regression test */
+ /* EXEC SQL SHOW SESSION AUTHORIZATION INTO :var; */
- { ECPGdisconnect(__LINE__, "ALL");
+ {
+ ECPGdisconnect(__LINE__, "ALL");
#line 36 "show.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 36 "show.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 36 "show.pgc"
- return 0;
+ return 0;
}
#line 5 "update.pgc"
-int main(int argc, char* argv[]) {
- /* exec sql begin declare section */
-
-
+int
+main(int argc, char *argv[])
+{
+ /* exec sql begin declare section */
+
+
#line 9 "update.pgc"
- int i1 [ 3 ] , i2 [ 3 ] ;
+ int i1[3],
+ i2[3];
+
/* exec sql end declare section */
#line 10 "update.pgc"
- ECPGdebug(1, stderr);
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ ECPGdebug(1, stderr);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 13 "update.pgc"
- /* exec sql whenever sql_warning sqlprint ; */
+ /* exec sql whenever sql_warning sqlprint ; */
#line 15 "update.pgc"
- /* exec sql whenever sqlerror sqlprint ; */
+ /* exec sql whenever sqlerror sqlprint ; */
#line 16 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b int ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test ( a int , b int ) ", ECPGt_EOIT, ECPGt_EORT);
#line 18 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 18 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 18 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 1 , 1 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 1 , 1 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 20 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 20 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 20 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 2 , 2 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 2 , 2 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 21 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 21 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 21 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 3 , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test ( a , b ) values( 3 , 3 ) ", ECPGt_EOIT, ECPGt_EORT);
#line 22 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 22 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 22 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "update test set a = a + 1 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "update test set a = a + 1 ", ECPGt_EOIT, ECPGt_EORT);
#line 24 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 24 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 24 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "update test set ( a , b )=( 5 , 5 ) where a = 4 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "update test set ( a , b )=( 5 , 5 ) where a = 4 ", ECPGt_EOIT, ECPGt_EORT);
#line 25 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 25 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 25 "update.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "update test set a = 4 where a = 3 ", ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "update test set a = 4 where a = 3 ", ECPGt_EOIT, ECPGt_EORT);
#line 26 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 26 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 26 "update.pgc"
-;
-
- { ECPGdo(__LINE__, 0, 1, NULL, "select a , b from test order by a", ECPGt_EOIT,
- ECPGt_int,(i1),(long)1,(long)3,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,(i2),(long)1,(long)3,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);
+ ;
+
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select a , b from test order by a", ECPGt_EOIT,
+ ECPGt_int, (i1), (long) 1, (long) 3, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, (i2), (long) 1, (long) 3, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
#line 28 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 28 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 28 "update.pgc"
- printf("test\na b\n%d %d\n%d %d\n%d %d\n", i1[0], i2[0], i1[1], i2[1], i1[2], i2[2]);
+ printf("test\na b\n%d %d\n%d %d\n%d %d\n", i1[0], i2[0], i1[1], i2[1], i1[2], i2[2]);
- { ECPGdisconnect(__LINE__, "ALL");
+ {
+ ECPGdisconnect(__LINE__, "ALL");
#line 32 "update.pgc"
-if (sqlca.sqlwarn[0] == 'W') sqlprint();
+ if (sqlca.sqlwarn[0] == 'W')
+ sqlprint();
#line 32 "update.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 32 "update.pgc"
- return 0;
+ return 0;
}
#line 19 "thread.pgc"
-void *test_thread(void *arg);
+void *test_thread(void *arg);
-int nthreads = 10;
-int iterations = 20;
+int nthreads = 10;
+int iterations = 20;
-int main(int argc, char *argv[])
+int
+main(int argc, char *argv[])
{
- pthread_t *threads;
- int n;
- /* exec sql begin declare section */
-
-
+ pthread_t *threads;
+ int n;
+
+ /* exec sql begin declare section */
+
+
#line 31 "thread.pgc"
- int l_rows ;
+ int l_rows;
+
/* exec sql end declare section */
#line 32 "thread.pgc"
- /* Switch off debug output for regression tests. The threads get executed in
- * more or less random order */
- ECPGdebug(0, stderr);
+ /*
+ * Switch off debug output for regression tests. The threads get executed
+ * in more or less random order
+ */
+ ECPGdebug(0, stderr);
- /* setup test_thread table */
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ /* setup test_thread table */
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 41 "thread.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test_thread ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test_thread ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 42 "thread.pgc"
- /* DROP might fail */
- { ECPGtrans(__LINE__, NULL, "commit");}
+ /* DROP might fail */
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 43 "thread.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test_thread ( tstamp timestamp not null default cast( timeofday () as timestamp ) , thread TEXT not null , iteration integer not null , primary key( thread , iteration ) ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test_thread ( tstamp timestamp not null default cast( timeofday () as timestamp ) , thread TEXT not null , iteration integer not null , primary key( thread , iteration ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 48 "thread.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 49 "thread.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 50 "thread.pgc"
- /* create, and start, threads */
- threads = calloc(nthreads, sizeof(pthread_t));
- if( threads == NULL )
- {
- fprintf(stderr, "Cannot alloc memory\n");
- return( 1 );
- }
- for( n = 0; n < nthreads; n++ )
- {
- pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1));
- }
-
- /* wait for thread completion */
- for( n = 0; n < nthreads; n++ )
- {
- pthread_join(threads[n], NULL);
- }
- free(threads);
-
- /* and check results */
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ /* create, and start, threads */
+ threads = calloc(nthreads, sizeof(pthread_t));
+ if (threads == NULL)
+ {
+ fprintf(stderr, "Cannot alloc memory\n");
+ return (1);
+ }
+ for (n = 0; n < nthreads; n++)
+ {
+ pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1));
+ }
+
+ /* wait for thread completion */
+ for (n = 0; n < nthreads; n++)
+ {
+ pthread_join(threads[n], NULL);
+ }
+ free(threads);
+
+ /* and check results */
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 72 "thread.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select count (*) from test_thread ", ECPGt_EOIT,
- ECPGt_int,&(l_rows),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select count (*) from test_thread ", ECPGt_EOIT,
+ ECPGt_int, &(l_rows), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 73 "thread.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 74 "thread.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 75 "thread.pgc"
- if( l_rows == (nthreads * iterations) )
- printf("Success.\n");
- else
- printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows);
+ if (l_rows == (nthreads * iterations))
+ printf("Success.\n");
+ else
+ printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows);
- return( 0 );
+ return (0);
}
-void *test_thread(void *arg)
+void *
+test_thread(void *arg)
{
- long threadnum = (long)arg;
- /* exec sql begin declare section */
-
-
-
+ long threadnum = (long) arg;
+
+ /* exec sql begin declare section */
+
+
+
#line 88 "thread.pgc"
- int l_i ;
-
+ int l_i;
+
#line 89 "thread.pgc"
- char l_connection [ 128 ] ;
+ char l_connection[128];
+
/* exec sql end declare section */
#line 90 "thread.pgc"
- /* build up connection name, and connect to database */
- snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum);
- /* exec sql whenever sqlerror sqlprint ; */
+ /* build up connection name, and connect to database */
+ snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum);
+ /* exec sql whenever sqlerror sqlprint ; */
#line 94 "thread.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , l_connection, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, l_connection, 0);
#line 95 "thread.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 95 "thread.pgc"
- if( sqlca.sqlcode != 0 )
- {
- printf("%s: ERROR: cannot connect to database!\n", l_connection);
- return( NULL );
- }
- { ECPGtrans(__LINE__, l_connection, "begin transaction ");
+ if (sqlca.sqlcode != 0)
+ {
+ printf("%s: ERROR: cannot connect to database!\n", l_connection);
+ return (NULL);
+ }
+ {
+ ECPGtrans(__LINE__, l_connection, "begin transaction ");
#line 101 "thread.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 101 "thread.pgc"
- /* insert into test_thread table */
- for( l_i = 1; l_i <= iterations; l_i++ )
- {
+ /* insert into test_thread table */
+ for (l_i = 1; l_i <= iterations; l_i++)
+ {
#ifdef DEBUG
- printf("%s: inserting %d\n", l_connection, l_i);
+ printf("%s: inserting %d\n", l_connection, l_i);
#endif
- { ECPGdo(__LINE__, 0, 1, l_connection, "insert into test_thread ( thread , iteration ) values( ? , ? ) ",
- ECPGt_char,(l_connection),(long)128,(long)1,(128)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(l_i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, l_connection, "insert into test_thread ( thread , iteration ) values( ? , ? ) ",
+ ECPGt_char, (l_connection), (long) 128, (long) 1, (128) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(l_i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 109 "thread.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 109 "thread.pgc"
#ifdef DEBUG
- if( sqlca.sqlcode == 0 )
- printf("%s: insert done\n", l_connection);
- else
- printf("%s: ERROR: insert failed!\n", l_connection);
+ if (sqlca.sqlcode == 0)
+ printf("%s: insert done\n", l_connection);
+ else
+ printf("%s: ERROR: insert failed!\n", l_connection);
#endif
- }
+ }
- /* all done */
- { ECPGtrans(__LINE__, l_connection, "commit");
+ /* all done */
+ {
+ ECPGtrans(__LINE__, l_connection, "commit");
#line 119 "thread.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 119 "thread.pgc"
- { ECPGdisconnect(__LINE__, l_connection);
+ {
+ ECPGdisconnect(__LINE__, l_connection);
#line 120 "thread.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 120 "thread.pgc"
#ifdef DEBUG
- printf("%s: done!\n", l_connection);
+ printf("%s: done!\n", l_connection);
#endif
- return( NULL );
+ return (NULL);
}
-#endif /* ENABLE_THREAD_SAFETY */
+
+#endif /* ENABLE_THREAD_SAFETY */
#line 20 "thread_implicit.pgc"
-void *test_thread(void *arg);
+void *test_thread(void *arg);
-int nthreads = 10;
-int iterations = 20;
+int nthreads = 10;
+int iterations = 20;
-int main(int argc, char *argv[])
+int
+main(int argc, char *argv[])
{
- pthread_t *threads;
- int n;
- /* exec sql begin declare section */
-
-
+ pthread_t *threads;
+ int n;
+
+ /* exec sql begin declare section */
+
+
#line 32 "thread_implicit.pgc"
- int l_rows ;
+ int l_rows;
+
/* exec sql end declare section */
#line 33 "thread_implicit.pgc"
- /* Switch off debug output for regression tests. The threads get executed in
- * more or less random order */
- ECPGdebug(0, stderr);
+ /*
+ * Switch off debug output for regression tests. The threads get executed
+ * in more or less random order
+ */
+ ECPGdebug(0, stderr);
- /* setup test_thread table */
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ /* setup test_thread table */
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 42 "thread_implicit.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "drop table test_thread ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "drop table test_thread ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 43 "thread_implicit.pgc"
- /* DROP might fail */
- { ECPGtrans(__LINE__, NULL, "commit");}
+ /* DROP might fail */
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 44 "thread_implicit.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "create table test_thread ( tstamp timestamp not null default cast( timeofday () as timestamp ) , thread TEXT not null , iteration integer not null , primary key( thread , iteration ) ) ", ECPGt_EOIT, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "create table test_thread ( tstamp timestamp not null default cast( timeofday () as timestamp ) , thread TEXT not null , iteration integer not null , primary key( thread , iteration ) ) ", ECPGt_EOIT, ECPGt_EORT);
+ }
#line 49 "thread_implicit.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 50 "thread_implicit.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 51 "thread_implicit.pgc"
- /* create, and start, threads */
- threads = calloc(nthreads, sizeof(pthread_t));
- if( threads == NULL )
- {
- fprintf(stderr, "Cannot alloc memory\n");
- return( 1 );
- }
- for( n = 0; n < nthreads; n++ )
- {
- pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1));
- }
-
- /* wait for thread completion */
- for( n = 0; n < nthreads; n++ )
- {
- pthread_join(threads[n], NULL);
- }
- free(threads);
-
- /* and check results */
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , NULL, 0); }
+ /* create, and start, threads */
+ threads = calloc(nthreads, sizeof(pthread_t));
+ if (threads == NULL)
+ {
+ fprintf(stderr, "Cannot alloc memory\n");
+ return (1);
+ }
+ for (n = 0; n < nthreads; n++)
+ {
+ pthread_create(&threads[n], NULL, test_thread, (void *) (n + 1));
+ }
+
+ /* wait for thread completion */
+ for (n = 0; n < nthreads; n++)
+ {
+ pthread_join(threads[n], NULL);
+ }
+ free(threads);
+
+ /* and check results */
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, NULL, 0);
+ }
#line 73 "thread_implicit.pgc"
- { ECPGdo(__LINE__, 0, 1, NULL, "select count (*) from test_thread ", ECPGt_EOIT,
- ECPGt_int,&(l_rows),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EORT);}
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "select count (*) from test_thread ", ECPGt_EOIT,
+ ECPGt_int, &(l_rows), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EORT);
+ }
#line 74 "thread_implicit.pgc"
- { ECPGtrans(__LINE__, NULL, "commit");}
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
+ }
#line 75 "thread_implicit.pgc"
- { ECPGdisconnect(__LINE__, "CURRENT");}
+ {
+ ECPGdisconnect(__LINE__, "CURRENT");
+ }
#line 76 "thread_implicit.pgc"
- if( l_rows == (nthreads * iterations) )
- printf("Success.\n");
- else
- printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows);
+ if (l_rows == (nthreads * iterations))
+ printf("Success.\n");
+ else
+ printf("ERROR: Failure - expecting %d rows, got %d.\n", nthreads * iterations, l_rows);
- return( 0 );
+ return (0);
}
-void *test_thread(void *arg)
+void *
+test_thread(void *arg)
{
- long threadnum = (long)arg;
- /* exec sql begin declare section */
-
-
-
+ long threadnum = (long) arg;
+
+ /* exec sql begin declare section */
+
+
+
#line 89 "thread_implicit.pgc"
- int l_i ;
-
+ int l_i;
+
#line 90 "thread_implicit.pgc"
- char l_connection [ 128 ] ;
+ char l_connection[128];
+
/* exec sql end declare section */
#line 91 "thread_implicit.pgc"
- /* build up connection name, and connect to database */
- snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum);
- /* exec sql whenever sqlerror sqlprint ; */
+ /* build up connection name, and connect to database */
+ snprintf(l_connection, sizeof(l_connection), "thread_%03ld", threadnum);
+ /* exec sql whenever sqlerror sqlprint ; */
#line 95 "thread_implicit.pgc"
- { ECPGconnect(__LINE__, 0, "regress1" , NULL,NULL , l_connection, 0);
+ {
+ ECPGconnect(__LINE__, 0, "regress1", NULL, NULL, l_connection, 0);
#line 96 "thread_implicit.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 96 "thread_implicit.pgc"
- if( sqlca.sqlcode != 0 )
- {
- printf("%s: ERROR: cannot connect to database!\n", l_connection);
- return( NULL );
- }
- { ECPGtrans(__LINE__, NULL, "begin transaction ");
+ if (sqlca.sqlcode != 0)
+ {
+ printf("%s: ERROR: cannot connect to database!\n", l_connection);
+ return (NULL);
+ }
+ {
+ ECPGtrans(__LINE__, NULL, "begin transaction ");
#line 102 "thread_implicit.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 102 "thread_implicit.pgc"
- /* insert into test_thread table */
- for( l_i = 1; l_i <= iterations; l_i++ )
- {
+ /* insert into test_thread table */
+ for (l_i = 1; l_i <= iterations; l_i++)
+ {
#ifdef DEBUG
- printf("%s: inserting %d\n", l_connection, l_i);
+ printf("%s: inserting %d\n", l_connection, l_i);
#endif
- { ECPGdo(__LINE__, 0, 1, NULL, "insert into test_thread ( thread , iteration ) values( ? , ? ) ",
- ECPGt_char,(l_connection),(long)128,(long)1,(128)*sizeof(char),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L,
- ECPGt_int,&(l_i),(long)1,(long)1,sizeof(int),
- ECPGt_NO_INDICATOR, NULL , 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
+ {
+ ECPGdo(__LINE__, 0, 1, NULL, "insert into test_thread ( thread , iteration ) values( ? , ? ) ",
+ ECPGt_char, (l_connection), (long) 128, (long) 1, (128) * sizeof(char),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L,
+ ECPGt_int, &(l_i), (long) 1, (long) 1, sizeof(int),
+ ECPGt_NO_INDICATOR, NULL, 0L, 0L, 0L, ECPGt_EOIT, ECPGt_EORT);
#line 110 "thread_implicit.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 110 "thread_implicit.pgc"
#ifdef DEBUG
- if( sqlca.sqlcode == 0 )
- printf("%s: insert done\n", l_connection);
- else
- printf("%s: ERROR: insert failed!\n", l_connection);
+ if (sqlca.sqlcode == 0)
+ printf("%s: insert done\n", l_connection);
+ else
+ printf("%s: ERROR: insert failed!\n", l_connection);
#endif
- }
+ }
- /* all done */
- { ECPGtrans(__LINE__, NULL, "commit");
+ /* all done */
+ {
+ ECPGtrans(__LINE__, NULL, "commit");
#line 120 "thread_implicit.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 120 "thread_implicit.pgc"
- { ECPGdisconnect(__LINE__, l_connection);
+ {
+ ECPGdisconnect(__LINE__, l_connection);
#line 121 "thread_implicit.pgc"
-if (sqlca.sqlcode < 0) sqlprint();}
+ if (sqlca.sqlcode < 0)
+ sqlprint();
+ }
#line 121 "thread_implicit.pgc"
#ifdef DEBUG
- printf("%s: done!\n", l_connection);
+ printf("%s: done!\n", l_connection);
#endif
- return( NULL );
+ return (NULL);
}
-#endif /* ENABLE_THREAD_SAFETY */
+
+#endif /* ENABLE_THREAD_SAFETY */
* exceed INITIAL_EXPBUFFER_SIZE (currently 256 bytes).
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.120 2006/09/22 21:39:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-auth.c,v 1.121 2006/10/04 00:30:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Various krb5 state which is not connection specific, and a flag to
* indicate whether we have initialised it yet.
*/
-/*
+/*
static int pg_krb5_initialised;
static krb5_context pg_krb5_context;
static krb5_ccache pg_krb5_ccache;
struct krb5_info
{
- int pg_krb5_initialised;
- krb5_context pg_krb5_context;
- krb5_ccache pg_krb5_ccache;
- krb5_principal pg_krb5_client;
- char *pg_krb5_name;
+ int pg_krb5_initialised;
+ krb5_context pg_krb5_context;
+ krb5_ccache pg_krb5_ccache;
+ krb5_principal pg_krb5_client;
+ char *pg_krb5_name;
};
static int
-pg_krb5_init(char *PQerrormsg, struct krb5_info *info)
+pg_krb5_init(char *PQerrormsg, struct krb5_info * info)
{
krb5_error_code retval;
return STATUS_OK;
}
-static void
-pg_krb5_destroy(struct krb5_info *info)
+static void
+pg_krb5_destroy(struct krb5_info * info)
{
krb5_free_principal(info->pg_krb5_context, info->pg_krb5_client);
krb5_cc_close(info->pg_krb5_context, info->pg_krb5_ccache);
static char *
pg_krb5_authname(char *PQerrormsg)
{
- char *tmp_name;
+ char *tmp_name;
struct krb5_info info;
+
info.pg_krb5_initialised = 0;
if (pg_krb5_init(PQerrormsg, &info) != STATUS_OK)
krb5_auth_context auth_context = NULL;
krb5_error *err_ret = NULL;
struct krb5_info info;
+
info.pg_krb5_initialised = 0;
if (!hostname)
pg_fe_getauthname(char *PQerrormsg)
{
#ifdef KRB5
- char *krb5_name = NULL;
+ char *krb5_name = NULL;
#endif
const char *name = NULL;
char *authn;
pglock_thread();
#ifdef KRB5
- /* pg_krb5_authname gives us a strdup'd value that we need
- * to free later, however, we don't want to free 'name' directly
- * in case it's *not* a Kerberos login and we fall through to
- * name = pw->pw_name; */
+
+ /*
+ * pg_krb5_authname gives us a strdup'd value that we need to free later,
+ * however, we don't want to free 'name' directly in case it's *not* a
+ * Kerberos login and we fall through to name = pw->pw_name;
+ */
krb5_name = pg_krb5_authname(PQerrormsg);
name = krb5_name;
#endif
*
* This is intended to be used by client applications that wish to send
* commands like ALTER USER joe PASSWORD 'pwd'. The password need not
- * be sent in cleartext if it is encrypted on the client side. This is
+ * be sent in cleartext if it is encrypted on the client side. This is
* good because it ensures the cleartext password won't end up in logs,
* pg_stat displays, etc. We export the function so that clients won't
* be dependent on low-level details like whether the enceyption is MD5
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.336 2006/09/27 15:41:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.337 2006/10/04 00:30:12 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct timeval LDAP_TIMEVAL;
#endif
static int ldapServiceLookup(const char *purl, PQconninfoOption *options,
- PQExpBuffer errorMessage);
+ PQExpBuffer errorMessage);
#endif
#include "libpq/ip.h"
conn->sslmode = strdup(DefaultSSLMode);
/*
- * Only if we get this far is it appropriate to try to connect.
- * (We need a state flag, rather than just the boolean result of
- * this function, in case someone tries to PQreset() the PGconn.)
+ * Only if we get this far is it appropriate to try to connect. (We need a
+ * state flag, rather than just the boolean result of this function, in
+ * case someone tries to PQreset() the PGconn.)
*/
conn->options_valid = true;
ldapServiceLookup(const char *purl, PQconninfoOption *options,
PQExpBuffer errorMessage)
{
- int port = LDAP_DEF_PORT, scope, rc, msgid, size, state, oldstate, i;
+ int port = LDAP_DEF_PORT,
+ scope,
+ rc,
+ msgid,
+ size,
+ state,
+ oldstate,
+ i;
bool found_keyword;
- char *url, *hostname, *portstr, *endptr, *dn, *scopestr, *filter,
- *result, *p, *p1 = NULL, *optname = NULL, *optval = NULL;
+ char *url,
+ *hostname,
+ *portstr,
+ *endptr,
+ *dn,
+ *scopestr,
+ *filter,
+ *result,
+ *p,
+ *p1 = NULL,
+ *optname = NULL,
+ *optval = NULL;
char *attrs[2] = {NULL, NULL};
LDAP *ld = NULL;
- LDAPMessage *res, *entry;
+ LDAPMessage *res,
+ *entry;
struct berval **values;
LDAP_TIMEVAL time = {PGLDAP_TIMEOUT, 0};
}
/*
- * Parse URL components, check for correctness. Basically, url has
- * '\0' placed at component boundaries and variables are pointed
- * at each component.
+ * Parse URL components, check for correctness. Basically, url has '\0'
+ * placed at component boundaries and variables are pointed at each
+ * component.
*/
if (pg_strncasecmp(url, LDAP_URL, strlen(LDAP_URL)) != 0)
/* hostname */
hostname = url + strlen(LDAP_URL);
- if (*hostname == '/') /* no hostname? */
- hostname = "localhost"; /* the default */
+ if (*hostname == '/') /* no hostname? */
+ hostname = "localhost"; /* the default */
/* dn, "distinguished name" */
- p = strchr(url + strlen(LDAP_URL), '/');
+ p = strchr(url + strlen(LDAP_URL), '/');
if (p == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": missing distinguished name\n"), purl);
+ "bad LDAP URL \"%s\": missing distinguished name\n"), purl);
free(url);
return 3;
}
- *p = '\0'; /* terminate hostname */
+ *p = '\0'; /* terminate hostname */
dn = p + 1;
/* attribute */
if ((p = strchr(dn, '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": must have exactly one attribute\n"), purl);
+ "bad LDAP URL \"%s\": must have exactly one attribute\n"), purl);
free(url);
return 3;
}
if ((p = strchr(attrs[0], '?')) == NULL || *(p + 1) == '\0' || *(p + 1) == '?')
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": must have search scope (base/one/sub)\n"), purl);
+ "bad LDAP URL \"%s\": must have search scope (base/one/sub)\n"), purl);
free(url);
return 3;
}
if (*portstr == '\0' || *endptr != '\0' || errno || lport < 0 || lport > 65535)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": invalid port number\n"), purl);
+ "bad LDAP URL \"%s\": invalid port number\n"), purl);
free(url);
return 3;
}
if (strchr(attrs[0], ',') != NULL)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": must have exactly one attribute\n"), purl);
+ "bad LDAP URL \"%s\": must have exactly one attribute\n"), purl);
free(url);
return 3;
}
else
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "bad LDAP URL \"%s\": must have search scope (base/one/sub)\n"), purl);
+ "bad LDAP URL \"%s\": must have search scope (base/one/sub)\n"), purl);
free(url);
return 3;
}
}
/*
- * Initialize connection to the server. We do an explicit bind because
- * we want to return 2 if the bind fails.
+ * Initialize connection to the server. We do an explicit bind because we
+ * want to return 2 if the bind fails.
*/
if ((msgid = ldap_simple_bind(ld, NULL, NULL)) == -1)
{
else if (ld_is_nl_cr(*p))
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "missing \"=\" after \"%s\" in connection info string\n"),
- optname);
+ "missing \"=\" after \"%s\" in connection info string\n"),
+ optname);
return 3;
}
else if (*p == '=')
else if (!ld_is_sp_tab(*p))
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "missing \"=\" after \"%s\" in connection info string\n"),
- optname);
+ "missing \"=\" after \"%s\" in connection info string\n"),
+ optname);
return 3;
}
break;
{
printfPQExpBuffer(errorMessage,
libpq_gettext("invalid connection option \"%s\"\n"),
- optname);
+ optname);
return 1;
}
optname = NULL;
if (state == 5 || state == 6)
{
printfPQExpBuffer(errorMessage, libpq_gettext(
- "unterminated quoted string in connection info string\n"));
+ "unterminated quoted string in connection info string\n"));
return 3;
}
#ifdef USE_LDAP
if (strncmp(line, "ldap", 4) == 0)
{
- int rc = ldapServiceLookup(line, options, errorMessage);
+ int rc = ldapServiceLookup(line, options, errorMessage);
+
/* if rc = 2, go on reading for fallback */
switch (rc)
{
else
{
/*
- * In protocol 2 we have to assume the setting will stick, and
- * adjust our state immediately. In protocol 3 and up we can
- * rely on the backend to report the parameter value, and we'll
- * change state at that time.
+ * In protocol 2 we have to assume the setting will stick, and adjust
+ * our state immediately. In protocol 3 and up we can rely on the
+ * backend to report the parameter value, and we'll change state at
+ * that time.
*/
if (PG_PROTOCOL_MAJOR(conn->pversion) < 3)
pqSaveParameterStatus(conn, "client_encoding", encoding);
if (hostname == NULL)
hostname = DefaultHost;
else if (is_absolute_path(hostname))
+
/*
- * We should probably use canonicalize_path(), but then
- * we have to bring path.c into libpq, and it doesn't
- * seem worth it.
+ * We should probably use canonicalize_path(), but then we have to
+ * bring path.c into libpq, and it doesn't seem worth it.
*/
if (strcmp(hostname, DEFAULT_PGSOCKET_DIR) == 0)
hostname = DefaultHost;
-
+
if (port == NULL)
port = DEF_PGPORT_STR;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.190 2006/08/18 19:52:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.191 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* values that result in backward-compatible behavior
*/
static int static_client_encoding = PG_SQL_ASCII;
-static bool static_std_strings = false;
+static bool static_std_strings = false;
static bool PQsendQueryStart(PGconn *conn);
static void parseInput(PGconn *conn);
static bool PQexecStart(PGconn *conn);
static PGresult *PQexecFinish(PGconn *conn);
-static int PQsendDescribe(PGconn *conn, char desc_type,
- const char *desc_target);
+static int PQsendDescribe(PGconn *conn, char desc_type,
+ const char *desc_target);
/* ----------------
}
/*
- * Special hacks: remember client_encoding and standard_conforming_strings,
- * and convert server version to a numeric form. We keep the first two of
- * these in static variables as well, so that PQescapeString and
- * PQescapeBytea can behave somewhat sanely (at least in single-
- * connection-using programs).
+ * Special hacks: remember client_encoding and
+ * standard_conforming_strings, and convert server version to a numeric
+ * form. We keep the first two of these in static variables as well, so
+ * that PQescapeString and PQescapeBytea can behave somewhat sanely (at
+ * least in single- connection-using programs).
*/
if (strcmp(name, "client_encoding") == 0)
{
* If the query was not even sent, return NULL; conn->errorMessage is set to
* a relevant message.
* If the query was sent, a new PGresult is returned (which could indicate
- * either success or failure). On success, the PGresult contains status
+ * either success or failure). On success, the PGresult contains status
* PGRES_COMMAND_OK, and its parameter and column-heading fields describe
* the statement's inputs and outputs respectively.
* The user is responsible for freeing the PGresult via PQclear()
* Common code to send a Describe command
*
* Available options for desc_type are
- * 'S' to describe a prepared statement; or
- * 'P' to describe a portal.
+ * 'S' to describe a prepared statement; or
+ * 'P' to describe a portal.
* Returns 1 on success and 0 on failure.
*/
static int
/*
* Process any NOTICE or NOTIFY messages that might be pending in the
- * input buffer. Since the server might generate many notices during
- * the COPY, we want to clean those out reasonably promptly to prevent
- * indefinite expansion of the input buffer. (Note: the actual read
- * of input data into the input buffer happens down inside pqSendSome,
- * but it's not authorized to get rid of the data again.)
+ * input buffer. Since the server might generate many notices during the
+ * COPY, we want to clean those out reasonably promptly to prevent
+ * indefinite expansion of the input buffer. (Note: the actual read of
+ * input data into the input buffer happens down inside pqSendSome, but
+ * it's not authorized to get rid of the data again.)
*/
parseInput(conn);
param_num, res->numParameters - 1);
return FALSE;
}
-
+
return TRUE;
}
char *
PQcmdTuples(PGresult *res)
{
- char *p, *c;
+ char *p,
+ *c;
if (!res)
return "";
while (*p && *p != ' ')
p++;
if (*p == 0)
- goto interpret_error; /* no space? */
+ goto interpret_error; /* no space? */
p++;
}
else if (strncmp(res->cmdStatus, "DELETE ", 7) == 0 ||
goto interpret_error;
return p;
-
+
interpret_error:
pqInternalNotice(&res->noticeHooks,
"could not interpret result from server: %s",
}
/* PQnparams:
- * returns the number of input parameters of a prepared statement.
+ * returns the number of input parameters of a prepared statement.
*/
int
PQnparams(const PGresult *res)
}
/* PQparamtype:
- * returns type Oid of the specified statement parameter.
+ * returns type Oid of the specified statement parameter.
*/
Oid
PQparamtype(const PGresult *res, int param_num)
while (remaining > 0 && *source != '\0')
{
- char c = *source;
- int len;
- int i;
+ char c = *source;
+ int len;
+ int i;
/* Fast path for plain ASCII */
if (!IS_HIGHBIT_SET(c))
/*
* If we hit premature end of string (ie, incomplete multibyte
- * character), try to pad out to the correct length with spaces.
- * We may not be able to pad completely, but we will always be able
- * to insert at least one pad space (since we'd not have quoted a
+ * character), try to pad out to the correct length with spaces. We
+ * may not be able to pad completely, but we will always be able to
+ * insert at least one pad space (since we'd not have quoted a
* multibyte character). This should be enough to make a string that
* the server will error out on.
*/
*error = 1;
if (conn)
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("incomplete multibyte character\n"));
+ libpq_gettext("incomplete multibyte character\n"));
for (; i < len; i++)
{
if (((size_t) (target - to)) / 2 >= length)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.59 2006/09/07 15:37:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.60 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (tmp != nbytes)
{
/*
- * If lo_write() failed, we are now in an aborted transaction
- * so there's no need for lo_close(); furthermore, if we tried
- * it we'd overwrite the useful error result with a useless one.
- * So just nail the doors shut and get out of town.
+ * If lo_write() failed, we are now in an aborted transaction so
+ * there's no need for lo_close(); furthermore, if we tried it
+ * we'd overwrite the useful error result with a useless one. So
+ * just nail the doors shut and get out of town.
*/
(void) close(fd);
return InvalidOid;
if (nbytes < 0)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("could not read from file \"%s\": %s\n"),
+ libpq_gettext("could not read from file \"%s\": %s\n"),
filename, pqStrerror(errno, sebuf, sizeof(sebuf)));
lobjOid = InvalidOid;
}
}
/*
- * If lo_read() failed, we are now in an aborted transaction
- * so there's no need for lo_close(); furthermore, if we tried
- * it we'd overwrite the useful error result with a useless one.
- * So skip lo_close() if we got a failure result.
+ * If lo_read() failed, we are now in an aborted transaction so there's no
+ * need for lo_close(); furthermore, if we tried it we'd overwrite the
+ * useful error result with a useless one. So skip lo_close() if we got a
+ * failure result.
*/
if (nbytes < 0 ||
lo_close(conn, lobj) != 0)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.129 2006/07/14 05:28:29 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.130 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int pqPutMsgBytes(const void *buf, size_t len, PGconn *conn);
static int pqSendSome(PGconn *conn, int len);
-static int pqSocketCheck(PGconn *conn, int forRead, int forWrite,
+static int pqSocketCheck(PGconn *conn, int forRead, int forWrite,
time_t end_time);
static int pqSocketPoll(int sock, int forRead, int forWrite, time_t end_time);
* didn't really belong there.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.72 2006/07/14 16:59:19 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.73 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
if (po->caption)
fprintf(fout,
-
"
%s\n",
+ "
%s\n",
po->tableOpt ? po->tableOpt : "",
po->caption);
else
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.28 2006/10/01 22:25:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.29 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int getCopyStart(PGconn *conn, ExecStatusType copytype);
static int getReadyForQuery(PGconn *conn);
static void reportErrorPosition(PQExpBuffer msg, const char *query,
- int loc, int encoding);
+ int loc, int encoding);
static int build_startup_packet(const PGconn *conn, char *packet,
const PQEnvironmentOption *options);
/* First 'T' in a query sequence */
if (getRowDescriptions(conn))
return;
+
/*
- * If we're doing a Describe, we're ready to pass
- * the result back to the client.
+ * If we're doing a Describe, we're ready to pass the
+ * result back to the client.
*/
if (conn->queryclass == PGQUERY_DESCRIBE)
conn->asyncStatus = PGASYNC_READY;
if (conn->result == NULL)
conn->result = PQmakeEmptyPGresult(conn,
PGRES_COMMAND_OK);
+
/*
- * If we're doing a Describe, we're ready to pass
- * the result back to the client.
+ * If we're doing a Describe, we're ready to pass the
+ * result back to the client.
*/
if (conn->queryclass == PGQUERY_DESCRIBE)
conn->asyncStatus = PGASYNC_READY;
int i;
/*
- * When doing Describe for a prepared statement, there'll already be
- * a PGresult created by getParamDescriptions, and we should fill
- * data into that. Otherwise, create a new, empty PGresult.
+ * When doing Describe for a prepared statement, there'll already be a
+ * PGresult created by getParamDescriptions, and we should fill data into
+ * that. Otherwise, create a new, empty PGresult.
*/
if (conn->queryclass == PGQUERY_DESCRIBE)
{
return 0;
failure:
+
/*
* Discard incomplete result, unless it's from getParamDescriptions.
*
* Note that if we hit a bufferload boundary while handling the
* describe-statement case, we'll forget any PGresult space we just
- * allocated, and then reallocate it on next try. This will bloat
- * the PGresult a little bit but the space will be freed at PQclear,
- * so it doesn't seem worth trying to be smarter.
+ * allocated, and then reallocate it on next try. This will bloat the
+ * PGresult a little bit but the space will be freed at PQclear, so it
+ * doesn't seem worth trying to be smarter.
*/
if (result != conn->result)
PQclear(result);
static int
getParamDescriptions(PGconn *conn)
{
- PGresult *result;
- int nparams;
- int i;
-
+ PGresult *result;
+ int nparams;
+ int i;
+
result = PQmakeEmptyPGresult(conn, PGRES_COMMAND_OK);
if (!result)
goto failure;
/* get parameter info */
for (i = 0; i < nparams; i++)
{
- int typid;
-
+ int typid;
+
if (pqGetInt(&typid, 4, conn))
goto failure;
result->paramDescs[i].typid = typid;
/*
* Each character might occupy multiple physical bytes in the string, and
* in some Far Eastern character sets it might take more than one screen
- * column as well. We compute the starting byte offset and starting
+ * column as well. We compute the starting byte offset and starting
* screen column of each logical character, and store these in qidx[] and
* scridx[] respectively.
*/
mb_encoding = (pg_encoding_max_length(encoding) != 1);
/*
- * Within the scanning loop, cno is the current character's logical number,
- * qoffset is its offset in wquery, and scroffset is its starting logical
- * screen column (all indexed from 0). "loc" is the logical character
- * number of the error location. We scan to determine loc_line (the
- * 1-based line number containing loc) and ibeg/iend (first character
- * number and last+1 character number of the line containing loc).
- * Note that qidx[] and scridx[] are filled only as far as iend.
+ * Within the scanning loop, cno is the current character's logical
+ * number, qoffset is its offset in wquery, and scroffset is its starting
+ * logical screen column (all indexed from 0). "loc" is the logical
+ * character number of the error location. We scan to determine loc_line
+ * (the 1-based line number containing loc) and ibeg/iend (first character
+ * number and last+1 character number of the line containing loc). Note
+ * that qidx[] and scridx[] are filled only as far as iend.
*/
qoffset = 0;
scroffset = 0;
for (cno = 0; wquery[qoffset] != '\0'; cno++)
{
- char ch = wquery[qoffset];
+ char ch = wquery[qoffset];
qidx[cno] = qoffset;
scridx[cno] = scroffset;
wquery[qoffset] = ' ';
/*
- * If end-of-line, count lines and mark positions. Each \r or \n counts
- * as a line except when \r \n appear together.
+ * If end-of-line, count lines and mark positions. Each \r or \n
+ * counts as a line except when \r \n appear together.
*/
else if (ch == '\r' || ch == '\n')
{
/* Advance */
if (mb_encoding)
{
- int w;
+ int w;
w = pg_encoding_dsplen(encoding, &wquery[qoffset]);
/* treat any non-tab control chars as width 1 */
scroffset = 0;
for (; i < msg->len; i += pg_encoding_mblen(encoding, &msg->data[i]))
{
- int w = pg_encoding_dsplen(encoding, &msg->data[i]);
+ int w = pg_encoding_dsplen(encoding, &msg->data[i]);
if (w <= 0)
w = 1;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.87 2006/09/27 15:41:24 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.88 2006/10/04 00:30:13 momjian Exp $
*
* NOTES
* [ Most of these notes are wrong/obsolete, but perhaps not all ]
if (stat(fnbuf, &buf) == 0)
{
X509_STORE *cvstore;
-
+
if (!SSL_CTX_load_verify_locations(SSL_context, fnbuf, NULL))
{
char *err = SSLerrmessage();
if (X509_STORE_load_locations(cvstore, ROOT_CRL_FILE, NULL) != 0)
/* OpenSSL 0.96 does not support X509_V_FLAG_CRL_CHECK */
#ifdef X509_V_FLAG_CRL_CHECK
- X509_STORE_set_flags(cvstore,
- X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
+ X509_STORE_set_flags(cvstore,
+ X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
/* if not found, silently ignore; we do not require CRL */
#else
{
char *err = SSLerrmessage();
-
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("Installed SSL library does not support CRL certificates, file \"%s\"\n"),
fnbuf);
}
#endif
}
-
+
SSL_CTX_set_verify(SSL_context, SSL_VERIFY_PEER, verify_cb);
}
}
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.133 2006/09/07 15:37:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.134 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Oid PQparamtype(const PGresult *res, int param_num);
/* Describe prepared statements and portals */
-extern PGresult *PQdescribePrepared(PGconn *conn, const char *stmt);
-extern PGresult *PQdescribePortal(PGconn *conn, const char *portal);
+extern PGresult *PQdescribePrepared(PGconn *conn, const char *stmt);
+extern PGresult *PQdescribePortal(PGconn *conn, const char *portal);
extern int PQsendDescribePrepared(PGconn *conn, const char *stmt);
extern int PQsendDescribePortal(PGconn *conn, const char *portal);
/* Quoting strings before inclusion in queries. */
extern size_t PQescapeStringConn(PGconn *conn,
- char *to, const char *from, size_t length,
- int *error);
+ char *to, const char *from, size_t length,
+ int *error);
extern unsigned char *PQescapeByteaConn(PGconn *conn,
const unsigned char *from, size_t from_length,
size_t *to_length);
extern unsigned char *PQunescapeBytea(const unsigned char *strtext,
size_t *retbuflen);
+
/* These forms are deprecated! */
extern size_t PQescapeString(char *to, const char *from, size_t length);
extern unsigned char *PQescapeBytea(const unsigned char *from, size_t from_length,
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.115 2006/08/18 19:52:39 tgl Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.116 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Status indicators */
ConnStatusType status;
PGAsyncStatusType asyncStatus;
- PGTransactionStatusType xactStatus; /* never changes to ACTIVE */
+ PGTransactionStatusType xactStatus; /* never changes to ACTIVE */
PGQueryClass queryclass;
char *last_query; /* last SQL command, or NULL if unknown */
bool options_valid; /* true if OK to attempt connection */
#include "win32.h"
-/* Declared here to avoid pulling in all includes, which causes name collissions */
+/* Declared here to avoid pulling in all includes, which causes name collissions */
#ifdef ENABLE_NLS
extern char *
libpq_gettext(const char *msgid)
/**********************************************************************
* plperl.c - perl as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.118 2006/08/27 23:47:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.119 2006/10/04 00:30:13 momjian Exp $
*
**********************************************************************/
typedef struct plperl_call_data
{
plperl_proc_desc *prodesc;
- FunctionCallInfo fcinfo;
- Tuplestorestate *tuple_store;
- TupleDesc ret_tdesc;
- AttInMetadata *attinmeta;
- MemoryContext tmp_cxt;
+ FunctionCallInfo fcinfo;
+ Tuplestorestate *tuple_store;
+ TupleDesc ret_tdesc;
+ AttInMetadata *attinmeta;
+ MemoryContext tmp_cxt;
} plperl_call_data;
/**********************************************************************
#ifdef WIN32
- /*
+ /*
* The perl library on startup does horrible things like call
- * setlocale(LC_ALL,""). We have protected against that on most
- * platforms by setting the environment appropriately. However, on
- * Windows, setlocale() does not consult the environment, so we need
- * to save the existing locale settings before perl has a chance to
- * mangle them and restore them after its dirty deeds are done.
+ * setlocale(LC_ALL,""). We have protected against that on most platforms
+ * by setting the environment appropriately. However, on Windows,
+ * setlocale() does not consult the environment, so we need to save the
+ * existing locale settings before perl has a chance to mangle them and
+ * restore them after its dirty deeds are done.
*
* MSDN ref:
* http://msdn.microsoft.com/library/en-us/vclib/html/_crt_locale.asp
* subsequent calls to the interpreter don't mess with the locale
* settings.
*
- * We restore them using Perl's POSIX::setlocale() function so that
- * Perl doesn't have a different idea of the locale from Postgres.
+ * We restore them using Perl's POSIX::setlocale() function so that Perl
+ * doesn't have a different idea of the locale from Postgres.
*
*/
- char *loc;
- char *save_collate, *save_ctype, *save_monetary, *save_numeric, *save_time;
- char buf[1024];
+ char *loc;
+ char *save_collate,
+ *save_ctype,
+ *save_monetary,
+ *save_numeric,
+ *save_time;
+ char buf[1024];
- loc = setlocale(LC_COLLATE,NULL);
+ loc = setlocale(LC_COLLATE, NULL);
save_collate = loc ? pstrdup(loc) : NULL;
- loc = setlocale(LC_CTYPE,NULL);
+ loc = setlocale(LC_CTYPE, NULL);
save_ctype = loc ? pstrdup(loc) : NULL;
- loc = setlocale(LC_MONETARY,NULL);
+ loc = setlocale(LC_MONETARY, NULL);
save_monetary = loc ? pstrdup(loc) : NULL;
- loc = setlocale(LC_NUMERIC,NULL);
+ loc = setlocale(LC_NUMERIC, NULL);
save_numeric = loc ? pstrdup(loc) : NULL;
- loc = setlocale(LC_TIME,NULL);
+ loc = setlocale(LC_TIME, NULL);
save_time = loc ? pstrdup(loc) : NULL;
-
#endif
plperl_interp = perl_alloc();
#ifdef WIN32
- eval_pv("use POSIX qw(locale_h);", TRUE); /* croak on failure */
+ eval_pv("use POSIX qw(locale_h);", TRUE); /* croak on failure */
if (save_collate != NULL)
{
- snprintf(buf, sizeof(buf),"setlocale(%s,'%s');",
- "LC_COLLATE",save_collate);
- eval_pv(buf,TRUE);
+ snprintf(buf, sizeof(buf), "setlocale(%s,'%s');",
+ "LC_COLLATE", save_collate);
+ eval_pv(buf, TRUE);
pfree(save_collate);
}
if (save_ctype != NULL)
{
- snprintf(buf, sizeof(buf),"setlocale(%s,'%s');",
- "LC_CTYPE",save_ctype);
- eval_pv(buf,TRUE);
+ snprintf(buf, sizeof(buf), "setlocale(%s,'%s');",
+ "LC_CTYPE", save_ctype);
+ eval_pv(buf, TRUE);
pfree(save_ctype);
}
if (save_monetary != NULL)
{
- snprintf(buf, sizeof(buf),"setlocale(%s,'%s');",
- "LC_MONETARY",save_monetary);
- eval_pv(buf,TRUE);
+ snprintf(buf, sizeof(buf), "setlocale(%s,'%s');",
+ "LC_MONETARY", save_monetary);
+ eval_pv(buf, TRUE);
pfree(save_monetary);
}
if (save_numeric != NULL)
{
- snprintf(buf, sizeof(buf),"setlocale(%s,'%s');",
- "LC_NUMERIC",save_numeric);
- eval_pv(buf,TRUE);
+ snprintf(buf, sizeof(buf), "setlocale(%s,'%s');",
+ "LC_NUMERIC", save_numeric);
+ eval_pv(buf, TRUE);
pfree(save_numeric);
}
if (save_time != NULL)
{
- snprintf(buf, sizeof(buf),"setlocale(%s,'%s');",
- "LC_TIME",save_time);
- eval_pv(buf,TRUE);
+ snprintf(buf, sizeof(buf), "setlocale(%s,'%s');",
+ "LC_TIME", save_time);
+ eval_pv(buf, TRUE);
pfree(save_time);
}
-
#endif
}
SV *array_ret = NULL;
/*
- * Create the call_data beforing connecting to SPI, so that it is
- * not allocated in the SPI memory context
+ * Create the call_data beforing connecting to SPI, so that it is not
+ * allocated in the SPI memory context
*/
current_call_data = (plperl_call_data *) palloc0(sizeof(plperl_call_data));
current_call_data->fcinfo = fcinfo;
HV *hvTD;
/*
- * Create the call_data beforing connecting to SPI, so that it is
- * not allocated in the SPI memory context
+ * Create the call_data beforing connecting to SPI, so that it is not
+ * allocated in the SPI memory context
*/
current_call_data = (plperl_call_data *) palloc0(sizeof(plperl_call_data));
current_call_data->fcinfo = fcinfo;
{
bool uptodate;
- prodesc = INT2PTR( plperl_proc_desc *, SvUV(*svp));
+ prodesc = INT2PTR(plperl_proc_desc *, SvUV(*svp));
/************************************************************
* If it's present, must check whether it's still up to date.
}
hv_store(plperl_proc_hash, internal_proname, proname_len,
- newSVuv( PTR2UV( prodesc)), 0);
+ newSVuv(PTR2UV(prodesc)), 0);
}
ReleaseSysCache(procTup);
if (!current_call_data->ret_tdesc)
{
- TupleDesc tupdesc;
+ TupleDesc tupdesc;
Assert(!current_call_data->tuple_store);
Assert(!current_call_data->attinmeta);
/*
- * This is the first call to return_next in the current
- * PL/Perl function call, so memoize some lookups
+ * This is the first call to return_next in the current PL/Perl
+ * function call, so memoize some lookups
*/
if (prodesc->fn_retistuple)
(void) get_call_result_type(fcinfo, NULL, &tupdesc);
}
MemoryContextSwitchTo(old_cxt);
- }
+ }
/*
* Producing the tuple we want to return requires making plenty of
- * palloc() allocations that are not cleaned up. Since this
- * function can be called many times before the current memory
- * context is reset, we need to do those allocations in a
- * temporary context.
+ * palloc() allocations that are not cleaned up. Since this function can
+ * be called many times before the current memory context is reset, we
+ * need to do those allocations in a temporary context.
*/
if (!current_call_data->tmp_cxt)
{
/* Create a cursor for the query */
plan = SPI_prepare(query, 0, NULL);
- if ( plan == NULL)
+ if (plan == NULL)
elog(ERROR, "SPI_prepare() failed:%s",
- SPI_result_code_string(SPI_result));
+ SPI_result_code_string(SPI_result));
portal = SPI_cursor_open(NULL, plan, NULL, NULL, false);
- SPI_freeplan( plan);
- if ( portal == NULL)
+ SPI_freeplan(plan);
+ if (portal == NULL)
elog(ERROR, "SPI_cursor_open() failed:%s",
- SPI_result_code_string(SPI_result));
+ SPI_result_code_string(SPI_result));
cursor = newSVpv(portal->name, 0);
/* Commit the inner transaction, return to outer xact context */
void
plperl_spi_cursor_close(char *cursor)
{
- Portal p = SPI_cursor_find(cursor);
+ Portal p = SPI_cursor_find(cursor);
+
if (p)
SPI_cursor_close(p);
}
SV *
-plperl_spi_prepare(char* query, int argc, SV ** argv)
+plperl_spi_prepare(char *query, int argc, SV **argv)
{
plperl_query_desc *qdesc;
void *plan;
************************************************************/
qdesc = (plperl_query_desc *) malloc(sizeof(plperl_query_desc));
MemSet(qdesc, 0, sizeof(plperl_query_desc));
- snprintf(qdesc-> qname, sizeof(qdesc-> qname), "%lx", (long) qdesc);
- qdesc-> nargs = argc;
- qdesc-> argtypes = (Oid *) malloc(argc * sizeof(Oid));
- qdesc-> arginfuncs = (FmgrInfo *) malloc(argc * sizeof(FmgrInfo));
- qdesc-> argtypioparams = (Oid *) malloc(argc * sizeof(Oid));
+ snprintf(qdesc->qname, sizeof(qdesc->qname), "%lx", (long) qdesc);
+ qdesc->nargs = argc;
+ qdesc->argtypes = (Oid *) malloc(argc * sizeof(Oid));
+ qdesc->arginfuncs = (FmgrInfo *) malloc(argc * sizeof(FmgrInfo));
+ qdesc->argtypioparams = (Oid *) malloc(argc * sizeof(Oid));
PG_TRY();
{
if (plan == NULL)
elog(ERROR, "SPI_prepare() failed:%s",
- SPI_result_code_string(SPI_result));
+ SPI_result_code_string(SPI_result));
/************************************************************
* Save the plan into permanent memory (right now it's in the
************************************************************/
qdesc->plan = SPI_saveplan(plan);
if (qdesc->plan == NULL)
- elog(ERROR, "SPI_saveplan() failed: %s",
- SPI_result_code_string(SPI_result));
+ elog(ERROR, "SPI_saveplan() failed: %s",
+ SPI_result_code_string(SPI_result));
/* Release the procCxt copy to avoid within-function memory leak */
SPI_freeplan(plan);
ReleaseCurrentSubTransaction();
MemoryContextSwitchTo(oldcontext);
CurrentResourceOwner = oldowner;
+
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just
+ * in case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
PG_CATCH();
{
ErrorData *edata;
-
- free(qdesc-> argtypes);
- free(qdesc-> arginfuncs);
- free(qdesc-> argtypioparams);
+
+ free(qdesc->argtypes);
+ free(qdesc->arginfuncs);
+ free(qdesc->argtypioparams);
free(qdesc);
/* Save error info */
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return
+ * to connected state.
*/
SPI_restore_connection();
* Insert a hashtable entry for the plan and return
* the key to the caller.
************************************************************/
- hv_store( plperl_query_hash, qdesc->qname, strlen(qdesc->qname), newSVuv( PTR2UV( qdesc)), 0);
+ hv_store(plperl_query_hash, qdesc->qname, strlen(qdesc->qname), newSVuv(PTR2UV(qdesc)), 0);
- return newSVpv( qdesc->qname, strlen(qdesc->qname));
-}
+ return newSVpv(qdesc->qname, strlen(qdesc->qname));
+}
HV *
-plperl_spi_exec_prepared(char* query, HV * attr, int argc, SV ** argv)
+plperl_spi_exec_prepared(char *query, HV *attr, int argc, SV **argv)
{
HV *ret_hv;
- SV **sv;
- int i, limit, spi_rv;
- char * nulls;
+ SV **sv;
+ int i,
+ limit,
+ spi_rv;
+ char *nulls;
Datum *argvalues;
plperl_query_desc *qdesc;
/*
- * Execute the query inside a sub-transaction, so we can cope with
- * errors sanely
+ * Execute the query inside a sub-transaction, so we can cope with errors
+ * sanely
*/
MemoryContext oldcontext = CurrentMemoryContext;
ResourceOwner oldowner = CurrentResourceOwner;
* Fetch the saved plan descriptor, see if it's o.k.
************************************************************/
sv = hv_fetch(plperl_query_hash, query, strlen(query), 0);
- if ( sv == NULL)
+ if (sv == NULL)
elog(ERROR, "spi_exec_prepared: Invalid prepared query passed");
- if ( *sv == NULL || !SvOK( *sv))
+ if (*sv == NULL || !SvOK(*sv))
elog(ERROR, "spi_exec_prepared: panic - plperl_query_hash value corrupted");
- qdesc = INT2PTR( plperl_query_desc *, SvUV(*sv));
- if ( qdesc == NULL)
+ qdesc = INT2PTR(plperl_query_desc *, SvUV(*sv));
+ if (qdesc == NULL)
elog(ERROR, "spi_exec_prepared: panic - plperl_query_hash value vanished");
- if ( qdesc-> nargs != argc)
- elog(ERROR, "spi_exec_prepared: expected %d argument(s), %d passed",
- qdesc-> nargs, argc);
-
+ if (qdesc->nargs != argc)
+ elog(ERROR, "spi_exec_prepared: expected %d argument(s), %d passed",
+ qdesc->nargs, argc);
+
/************************************************************
* Parse eventual attributes
************************************************************/
limit = 0;
- if ( attr != NULL)
+ if (attr != NULL)
{
- sv = hv_fetch( attr, "limit", 5, 0);
- if ( *sv && SvIOK( *sv))
- limit = SvIV( *sv);
+ sv = hv_fetch(attr, "limit", 5, 0);
+ if (*sv && SvIOK(*sv))
+ limit = SvIV(*sv);
}
/************************************************************
* Set up arguments
************************************************************/
- if (argc > 0)
+ if (argc > 0)
{
nulls = (char *) palloc(argc);
argvalues = (Datum *) palloc(argc * sizeof(Datum));
- }
- else
+ }
+ else
{
nulls = NULL;
argvalues = NULL;
}
- for (i = 0; i < argc; i++)
+ for (i = 0; i < argc; i++)
{
- if (SvTYPE(argv[i]) != SVt_NULL)
+ if (SvTYPE(argv[i]) != SVt_NULL)
{
argvalues[i] = InputFunctionCall(&qdesc->arginfuncs[i],
SvPV(argv[i], PL_na),
qdesc->argtypioparams[i],
-1);
nulls[i] = ' ';
- }
- else
+ }
+ else
{
argvalues[i] = InputFunctionCall(&qdesc->arginfuncs[i],
NULL,
/************************************************************
* go
************************************************************/
- spi_rv = SPI_execute_plan(qdesc-> plan, argvalues, nulls,
+ spi_rv = SPI_execute_plan(qdesc->plan, argvalues, nulls,
current_call_data->prodesc->fn_readonly, limit);
ret_hv = plperl_spi_execute_fetch_result(SPI_tuptable, SPI_processed,
spi_rv);
- if ( argc > 0)
+ if (argc > 0)
{
- pfree( argvalues);
- pfree( nulls);
+ pfree(argvalues);
+ pfree(nulls);
}
/* Commit the inner transaction, return to outer xact context */
ReleaseCurrentSubTransaction();
MemoryContextSwitchTo(oldcontext);
CurrentResourceOwner = oldowner;
+
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just
+ * in case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return
+ * to connected state.
*/
SPI_restore_connection();
}
SV *
-plperl_spi_query_prepared(char* query, int argc, SV ** argv)
+plperl_spi_query_prepared(char *query, int argc, SV **argv)
{
- SV **sv;
- int i;
- char * nulls;
+ SV **sv;
+ int i;
+ char *nulls;
Datum *argvalues;
plperl_query_desc *qdesc;
- SV *cursor;
- Portal portal = NULL;
+ SV *cursor;
+ Portal portal = NULL;
/*
- * Execute the query inside a sub-transaction, so we can cope with
- * errors sanely
+ * Execute the query inside a sub-transaction, so we can cope with errors
+ * sanely
*/
MemoryContext oldcontext = CurrentMemoryContext;
ResourceOwner oldowner = CurrentResourceOwner;
* Fetch the saved plan descriptor, see if it's o.k.
************************************************************/
sv = hv_fetch(plperl_query_hash, query, strlen(query), 0);
- if ( sv == NULL)
+ if (sv == NULL)
elog(ERROR, "spi_query_prepared: Invalid prepared query passed");
- if ( *sv == NULL || !SvOK( *sv))
+ if (*sv == NULL || !SvOK(*sv))
elog(ERROR, "spi_query_prepared: panic - plperl_query_hash value corrupted");
- qdesc = INT2PTR( plperl_query_desc *, SvUV(*sv));
- if ( qdesc == NULL)
+ qdesc = INT2PTR(plperl_query_desc *, SvUV(*sv));
+ if (qdesc == NULL)
elog(ERROR, "spi_query_prepared: panic - plperl_query_hash value vanished");
- if ( qdesc-> nargs != argc)
- elog(ERROR, "spi_query_prepared: expected %d argument(s), %d passed",
- qdesc-> nargs, argc);
-
+ if (qdesc->nargs != argc)
+ elog(ERROR, "spi_query_prepared: expected %d argument(s), %d passed",
+ qdesc->nargs, argc);
+
/************************************************************
* Set up arguments
************************************************************/
- if (argc > 0)
+ if (argc > 0)
{
nulls = (char *) palloc(argc);
argvalues = (Datum *) palloc(argc * sizeof(Datum));
- }
- else
+ }
+ else
{
nulls = NULL;
argvalues = NULL;
}
- for (i = 0; i < argc; i++)
+ for (i = 0; i < argc; i++)
{
- if (SvTYPE(argv[i]) != SVt_NULL)
+ if (SvTYPE(argv[i]) != SVt_NULL)
{
argvalues[i] = InputFunctionCall(&qdesc->arginfuncs[i],
SvPV(argv[i], PL_na),
qdesc->argtypioparams[i],
-1);
nulls[i] = ' ';
- }
- else
+ }
+ else
{
argvalues[i] = InputFunctionCall(&qdesc->arginfuncs[i],
NULL,
/************************************************************
* go
************************************************************/
- portal = SPI_cursor_open(NULL, qdesc-> plan, argvalues, nulls,
- current_call_data->prodesc->fn_readonly);
- if ( argc > 0)
+ portal = SPI_cursor_open(NULL, qdesc->plan, argvalues, nulls,
+ current_call_data->prodesc->fn_readonly);
+ if (argc > 0)
{
- pfree( argvalues);
- pfree( nulls);
+ pfree(argvalues);
+ pfree(nulls);
}
- if ( portal == NULL)
+ if (portal == NULL)
elog(ERROR, "SPI_cursor_open() failed:%s",
- SPI_result_code_string(SPI_result));
+ SPI_result_code_string(SPI_result));
cursor = newSVpv(portal->name, 0);
ReleaseCurrentSubTransaction();
MemoryContextSwitchTo(oldcontext);
CurrentResourceOwner = oldowner;
+
/*
- * AtEOSubXact_SPI() should not have popped any SPI context,
- * but just in case it did, make sure we remain connected.
+ * AtEOSubXact_SPI() should not have popped any SPI context, but just
+ * in case it did, make sure we remain connected.
*/
SPI_restore_connection();
}
CurrentResourceOwner = oldowner;
/*
- * If AtEOSubXact_SPI() popped any SPI context of the subxact,
- * it will have left us in a disconnected state. We need this
- * hack to return to connected state.
+ * If AtEOSubXact_SPI() popped any SPI context of the subxact, it will
+ * have left us in a disconnected state. We need this hack to return
+ * to connected state.
*/
SPI_restore_connection();
void
plperl_spi_freeplan(char *query)
{
- SV ** sv;
- void * plan;
+ SV **sv;
+ void *plan;
plperl_query_desc *qdesc;
sv = hv_fetch(plperl_query_hash, query, strlen(query), 0);
- if ( sv == NULL)
+ if (sv == NULL)
elog(ERROR, "spi_exec_freeplan: Invalid prepared query passed");
- if ( *sv == NULL || !SvOK( *sv))
+ if (*sv == NULL || !SvOK(*sv))
elog(ERROR, "spi_exec_freeplan: panic - plperl_query_hash value corrupted");
- qdesc = INT2PTR( plperl_query_desc *, SvUV(*sv));
- if ( qdesc == NULL)
+ qdesc = INT2PTR(plperl_query_desc *, SvUV(*sv));
+ if (qdesc == NULL)
elog(ERROR, "spi_exec_freeplan: panic - plperl_query_hash value vanished");
/*
- * free all memory before SPI_freeplan, so if it dies, nothing will be left over
- */
+ * free all memory before SPI_freeplan, so if it dies, nothing will be
+ * left over
+ */
hv_delete(plperl_query_hash, query, strlen(query), G_DISCARD);
- plan = qdesc-> plan;
- free(qdesc-> argtypes);
- free(qdesc-> arginfuncs);
- free(qdesc-> argtypioparams);
+ plan = qdesc->plan;
+ free(qdesc->argtypes);
+ free(qdesc->arginfuncs);
+ free(qdesc->argtypioparams);
free(qdesc);
- SPI_freeplan( plan);
+ SPI_freeplan(plan);
}
/*-------------------------------------------------------------------------
*
* plperl.h
- * Common include file for PL/Perl files
+ * Common include file for PL/Perl files
*
* This should be included _AFTER_ postgres.h and system include files
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1995, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.h,v 1.4 2006/03/05 16:40:51 adunstan Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.h,v 1.5 2006/10/04 00:30:13 momjian Exp $
*/
#ifndef PL_PERL_H
/* stop perl headers from hijacking stdio and other stuff on Windows */
#ifdef WIN32
#define WIN32IO_IS_STDIO
-/*
+/*
* isnan is defined in both the perl and mingw headers. We don't use it,
* so this just clears up the compile warning.
*/
#ifdef isnan
#undef isnan
#endif
-#endif
+#endif
/* required for perl API */
#include "EXTERN.h"
void plperl_return_next(SV *);
SV *plperl_spi_query(char *);
SV *plperl_spi_fetchrow(char *);
-SV *plperl_spi_prepare(char *, int, SV **);
-HV *plperl_spi_exec_prepared(char *, HV *, int, SV **);
-SV *plperl_spi_query_prepared(char *, int, SV **);
-void plperl_spi_freeplan(char *);
-void plperl_spi_cursor_close(char *);
+SV *plperl_spi_prepare(char *, int, SV **);
+HV *plperl_spi_exec_prepared(char *, HV *, int, SV **);
+SV *plperl_spi_query_prepared(char *, int, SV **);
+void plperl_spi_freeplan(char *);
+void plperl_spi_cursor_close(char *);
-#endif /* PL_PERL_H */
+#endif /* PL_PERL_H */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.107 2006/07/11 17:26:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.108 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
true);
function->tg_table_name_varno = var->dno;
-
+
/* add variable tg_table_schema */
var = plpgsql_build_variable("tg_table_schema", 0,
plpgsql_build_datatype(NAMEOID, -1),
true);
function->tg_table_schema_varno = var->dno;
-
+
/* Add the variable tg_nargs */
var = plpgsql_build_variable("tg_nargs", 0,
plpgsql_build_datatype(INT4OID, -1),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.179 2006/09/22 21:39:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.180 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Let the instrumentation plugin peek at this function
*/
if (*plugin_ptr && (*plugin_ptr)->func_beg)
- ((*plugin_ptr)->func_beg)(&estate, func);
+ ((*plugin_ptr)->func_beg) (&estate, func);
/*
* Now call the toplevel block of statements
if (estate.retistuple)
{
/*
- * We have to check that the returned tuple actually matches
- * the expected result type. XXX would be better to cache the
- * tupdesc instead of repeating get_call_result_type()
+ * We have to check that the returned tuple actually matches the
+ * expected result type. XXX would be better to cache the tupdesc
+ * instead of repeating get_call_result_type()
*/
TupleDesc tupdesc;
errmsg("returned record type does not match expected record type")));
break;
case TYPEFUNC_RECORD:
+
/*
* Failed to determine actual type of RECORD. We could
- * raise an error here, but what this means in practice
- * is that the caller is expecting any old generic
- * rowtype, so we don't really need to be restrictive.
- * Pass back the generated result type, instead.
+ * raise an error here, but what this means in practice is
+ * that the caller is expecting any old generic rowtype,
+ * so we don't really need to be restrictive. Pass back
+ * the generated result type, instead.
*/
tupdesc = estate.rettupdesc;
if (tupdesc == NULL) /* shouldn't happen */
}
/*
- * Copy tuple to upper executor memory, as a tuple Datum.
- * Make sure it is labeled with the caller-supplied tuple type.
+ * Copy tuple to upper executor memory, as a tuple Datum. Make
+ * sure it is labeled with the caller-supplied tuple type.
*/
estate.retval =
PointerGetDatum(SPI_returntuple((HeapTuple) (estate.retval),
* Let the instrumentation plugin peek at this function
*/
if (*plugin_ptr && (*plugin_ptr)->func_end)
- ((*plugin_ptr)->func_end)(&estate, func);
+ ((*plugin_ptr)->func_end) (&estate, func);
/* Clean up any leftover temporary memory */
FreeExprContext(estate.eval_econtext);
var = (PLpgSQL_var *) (estate.datums[func->tg_table_schema_varno]);
var->value = DirectFunctionCall1(namein,
CStringGetDatum(
- get_namespace_name(
- RelationGetNamespace(
- trigdata->tg_relation))));
+ get_namespace_name(
+ RelationGetNamespace(
+ trigdata->tg_relation))));
var->isnull = false;
var->freeval = true;
* Let the instrumentation plugin peek at this function
*/
if (*plugin_ptr && (*plugin_ptr)->func_beg)
- ((*plugin_ptr)->func_beg)(&estate, func);
+ ((*plugin_ptr)->func_beg) (&estate, func);
/*
* Now call the toplevel block of statements
* Let the instrumentation plugin peek at this function
*/
if (*plugin_ptr && (*plugin_ptr)->func_end)
- ((*plugin_ptr)->func_end)(&estate, func);
+ ((*plugin_ptr)->func_end) (&estate, func);
/* Clean up any leftover temporary memory */
FreeExprContext(estate.eval_econtext);
/* Let the plugin know that we are about to execute this statement */
if (*plugin_ptr && (*plugin_ptr)->stmt_beg)
- ((*plugin_ptr)->stmt_beg)(estate, stmt);
+ ((*plugin_ptr)->stmt_beg) (estate, stmt);
CHECK_FOR_INTERRUPTS();
/* Let the plugin know that we have finished executing this statement */
if (*plugin_ptr && (*plugin_ptr)->stmt_end)
- ((*plugin_ptr)->stmt_end)(estate, stmt);
+ ((*plugin_ptr)->stmt_end) (estate, stmt);
estate->err_stmt = save_estmt;
exec_eval_cleanup(estate);
/*
- * Get the by value
+ * Get the by value
*/
by_value = exec_eval_expr(estate, stmt->by, &isnull, &valtype);
by_value = exec_cast_value(by_value, valtype, var->datatype->typoid,
estate->eval_econtext = CreateExprContext(simple_eval_estate);
/*
- * Let the plugin see this function before we initialize any
- * local PL/pgSQL variables - note that we also give the plugin
- * a few function pointers so it can call back into PL/pgSQL
- * for doing things like variable assignments and stack traces
+ * Let the plugin see this function before we initialize any local
+ * PL/pgSQL variables - note that we also give the plugin a few function
+ * pointers so it can call back into PL/pgSQL for doing things like
+ * variable assignments and stack traces
*/
if (*plugin_ptr)
{
(*plugin_ptr)->assign_expr = exec_assign_expr;
if ((*plugin_ptr)->func_setup)
- ((*plugin_ptr)->func_setup)(estate, func);
+ ((*plugin_ptr)->func_setup) (estate, func);
}
}
PLpgSQL_expr *expr = stmt->sqlstmt;
/*
- * On the first call for this statement generate the plan, and
- * detect whether the statement is INSERT/UPDATE/DELETE
+ * On the first call for this statement generate the plan, and detect
+ * whether the statement is INSERT/UPDATE/DELETE
*/
if (expr->plan == NULL)
{
foreach(l2, (List *) lfirst(l))
{
- Query *q = (Query *) lfirst(l2);
+ Query *q = (Query *) lfirst(l2);
Assert(IsA(q, Query));
if (q->canSetTag)
}
/*
- * If we have INTO, then we only need one row back ... but if we have
- * INTO STRICT, ask for two rows, so that we can verify the statement
- * returns only one. INSERT/UPDATE/DELETE are always treated strictly.
- * Without INTO, just run the statement to completion (tcount = 0).
+ * If we have INTO, then we only need one row back ... but if we have INTO
+ * STRICT, ask for two rows, so that we can verify the statement returns
+ * only one. INSERT/UPDATE/DELETE are always treated strictly. Without
+ * INTO, just run the statement to completion (tcount = 0).
*
- * We could just ask for two rows always when using INTO, but there
- * are some cases where demanding the extra row costs significant time,
- * eg by forcing completion of a sequential scan. So don't do it unless
- * we need to enforce strictness.
+ * We could just ask for two rows always when using INTO, but there are
+ * some cases where demanding the extra row costs significant time, eg by
+ * forcing completion of a sequential scan. So don't do it unless we need
+ * to enforce strictness.
*/
if (stmt->into)
{
/*
* Check for error, and set FOUND if appropriate (for historical reasons
- * we set FOUND only for certain query types). Also Assert that we
+ * we set FOUND only for certain query types). Also Assert that we
* identified the statement type the same as SPI did.
*/
switch (rc)
if (tuptab == NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INTO used with a command that cannot return data")));
+ errmsg("INTO used with a command that cannot return data")));
/* Determine if we assign to a record or a row */
if (stmt->rec != NULL)
elog(ERROR, "unsupported target");
/*
- * If SELECT ... INTO specified STRICT, and the query didn't
- * find exactly one row, throw an error. If STRICT was not specified,
- * then allow the query to find any number of rows.
+ * If SELECT ... INTO specified STRICT, and the query didn't find
+ * exactly one row, throw an error. If STRICT was not specified, then
+ * allow the query to find any number of rows.
*/
if (n == 0)
{
int exec_res;
/*
- * First we evaluate the string expression after the EXECUTE keyword.
- * Its result is the querystring we have to execute.
+ * First we evaluate the string expression after the EXECUTE keyword. Its
+ * result is the querystring we have to execute.
*/
query = exec_eval_expr(estate, stmt->query, &isnull, &restype);
if (isnull)
if (tuptab == NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("INTO used with a command that cannot return data")));
+ errmsg("INTO used with a command that cannot return data")));
/* Determine if we assign to a record or a row */
if (stmt->rec != NULL)
elog(ERROR, "unsupported target");
/*
- * If SELECT ... INTO specified STRICT, and the query didn't
- * find exactly one row, throw an error. If STRICT was not specified,
- * then allow the query to find any number of rows.
+ * If SELECT ... INTO specified STRICT, and the query didn't find
+ * exactly one row, throw an error. If STRICT was not specified, then
+ * allow the query to find any number of rows.
*/
if (n == 0)
{
paramLI = (ParamListInfo)
MemoryContextAlloc(econtext->ecxt_per_tuple_memory,
sizeof(ParamListInfoData) +
- (expr->nparams - 1) * sizeof(ParamExternData));
+ (expr->nparams - 1) *sizeof(ParamExternData));
paramLI->numParams = expr->nparams;
for (i = 0; i < expr->nparams; i++)
case T_RowCompareExpr:
{
- RowCompareExpr *expr = (RowCompareExpr *) node;
+ RowCompareExpr *expr = (RowCompareExpr *) node;
if (!exec_simple_check_node((Node *) expr->largs))
return FALSE;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.31 2006/08/15 19:01:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_handler.c,v 1.32 2006/10/04 00:30:13 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("plpgsql functions cannot take type %s",
- format_type_be(argtypes[i]))));
+ format_type_be(argtypes[i]))));
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.80 2006/08/15 19:01:17 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.81 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* fields for "simple expression" fast-path execution: */
Expr *expr_simple_expr; /* NULL means not a simple expr */
Oid expr_simple_type;
+
/*
* if expr is simple AND in use in current xact, expr_simple_state is
* valid. Test validity by seeing if expr_simple_xid matches current XID.
int tg_op_varno;
int tg_relid_varno;
int tg_relname_varno;
- int tg_table_name_varno;
- int tg_table_schema_varno;
+ int tg_table_name_varno;
+ int tg_table_schema_varno;
int tg_nargs_varno;
int ndatums;
* We expect that a plugin would do this at library load time (_PG_init()).
* It must also be careful to set the rendezvous variable back to NULL
* if it is unloaded (_PG_fini()).
- *
+ *
* This structure is basically a collection of function pointers --- at
* various interesting points in pl_exec.c, we call these functions
* (if the pointers are non-NULL) to give the plugin a chance to watch
* what we are doing.
*
- * func_setup is called when we start a function, before we've initialized
- * the local variables defined by the function.
+ * func_setup is called when we start a function, before we've initialized
+ * the local variables defined by the function.
*
- * func_beg is called when we start a function, after we've initialized
- * the local variables.
+ * func_beg is called when we start a function, after we've initialized
+ * the local variables.
*
- * func_end is called at the end of a function.
+ * func_end is called at the end of a function.
*
- * stmt_beg and stmt_end are called before and after (respectively) each
- * statement.
+ * stmt_beg and stmt_end are called before and after (respectively) each
+ * statement.
*
* Also, immediately before any call to func_setup, PL/pgSQL fills in the
* error_callback and assign_expr fields with pointers to its own
- * plpgsql_exec_error_callback and exec_assign_expr functions. This is
+ * plpgsql_exec_error_callback and exec_assign_expr functions. This is
* a somewhat ad-hoc expedient to simplify life for debugger plugins.
*/
typedef struct
{
/* Function pointers set up by the plugin */
- void (*func_setup) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
- void (*func_beg) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
- void (*func_end) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
- void (*stmt_beg) (PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt);
- void (*stmt_end) (PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt);
+ void (*func_setup) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
+ void (*func_beg) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
+ void (*func_end) (PLpgSQL_execstate *estate, PLpgSQL_function *func);
+ void (*stmt_beg) (PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt);
+ void (*stmt_end) (PLpgSQL_execstate *estate, PLpgSQL_stmt *stmt);
/* Function pointers set by PL/pgSQL itself */
- void (*error_callback) (void *arg);
- void (*assign_expr) (PLpgSQL_execstate *estate, PLpgSQL_datum *target,
- PLpgSQL_expr *expr);
+ void (*error_callback) (void *arg);
+ void (*assign_expr) (PLpgSQL_execstate *estate, PLpgSQL_datum *target,
+ PLpgSQL_expr *expr);
} PLpgSQL_plugin;
/**********************************************************************
* plpython.c - python as a procedural language for PostgreSQL
*
- * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.88 2006/09/16 13:35:49 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.89 2006/10/04 00:30:14 momjian Exp $
*
*********************************************************************
*/
bool fn_readonly;
PLyTypeInfo result; /* also used to store info for trigger tuple
* type */
- bool is_setof; /* true, if procedure returns result set */
- PyObject *setof; /* contents of result set. */
- char **argnames; /* Argument names */
+ bool is_setof; /* true, if procedure returns result set */
+ PyObject *setof; /* contents of result set. */
+ char **argnames; /* Argument names */
PLyTypeInfo args[FUNC_MAX_ARGS];
int nargs;
PyObject *code; /* compiled procedure code */
{
PyObject_HEAD
/* HeapTuple *tuples; */
- PyObject *nrows; /* number of rows returned by query */
+ PyObject * nrows; /* number of rows returned by query */
PyObject *rows; /* data rows, or None if no data returned */
PyObject *status; /* query status, SPI_OK_*, or SPI_ERR_* */
} PLyResultObject;
if (!PyString_Check(plrv))
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("unexpected return value from trigger procedure"),
+ errmsg("unexpected return value from trigger procedure"),
errdetail("Expected None or a String.")));
srv = PyString_AsString(plrv);
else if (pg_strcasecmp(srv, "OK") != 0)
{
/*
- * accept "OK" as an alternative to None; otherwise,
- * raise an error
+ * accept "OK" as an alternative to None; otherwise, raise an
+ * error
*/
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("unexpected return value from trigger procedure"),
+ errmsg("unexpected return value from trigger procedure"),
errdetail("Expected None, \"OK\", \"SKIP\", or \"MODIFY\".")));
}
}
modvalues[i] =
InputFunctionCall(&proc->result.out.r.atts[atti].typfunc,
src,
- proc->result.out.r.atts[atti].typioparam,
+ proc->result.out.r.atts[atti].typioparam,
tupdesc->attrs[atti]->atttypmod);
modnulls[i] = ' ';
modvalues[i] =
InputFunctionCall(&proc->result.out.r.atts[atti].typfunc,
NULL,
- proc->result.out.r.atts[atti].typioparam,
+ proc->result.out.r.atts[atti].typioparam,
tupdesc->attrs[atti]->atttypmod);
modnulls[i] = 'n';
}
*pltevent,
*pltwhen,
*pltlevel,
- *pltrelid,
- *plttablename,
- *plttableschema;
+ *pltrelid,
+ *plttablename,
+ *plttableschema;
PyObject *pltargs,
*pytnew,
*pytold;
PyDict_SetItemString(pltdata, "table_name", plttablename);
Py_DECREF(plttablename);
pfree(stroid);
-
+
stroid = SPI_getnspname(tdata->tg_relation);
plttableschema = PyString_FromString(stroid);
PyDict_SetItemString(pltdata, "table_schema", plttableschema);
Py_DECREF(plttableschema);
pfree(stroid);
-
+
if (TRIGGER_FIRED_BEFORE(tdata->tg_event))
pltwhen = PyString_FromString("BEFORE");
plargs = PLy_function_build_args(fcinfo, proc);
plrv = PLy_procedure_call(proc, "args", plargs);
if (!proc->is_setof)
- /* SETOF function parameters will be deleted when last row is returned */
+
+ /*
+ * SETOF function parameters will be deleted when last row is
+ * returned
+ */
PLy_function_delete_args(proc);
Assert(plrv != NULL);
Assert(!PLy_error_in_progress);
if (proc->is_setof)
{
- bool has_error = false;
- ReturnSetInfo *rsi = (ReturnSetInfo *)fcinfo->resultinfo;
+ bool has_error = false;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
if (proc->setof == NULL)
{
/* first time -- do checks and setup */
if (!rsi || !IsA(rsi, ReturnSetInfo) ||
- (rsi->allowedModes & SFRM_ValuePerCall) == 0)
+ (rsi->allowedModes & SFRM_ValuePerCall) == 0)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("returned object can not be iterated"),
- errdetail("SETOF must be returned as iterable object")));
+ errdetail("SETOF must be returned as iterable object")));
}
/* Fetch next from iterator */
if (has_error)
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("error fetching next item from iterator")));
+ errmsg("error fetching next item from iterator")));
fcinfo->isnull = true;
- return (Datum)NULL;
+ return (Datum) NULL;
}
}
/*
- * If the function is declared to return void, the Python
- * return value must be None. For void-returning functions, we
- * also treat a None return value as a special "void datum"
- * rather than NULL (as is the case for non-void-returning
- * functions).
+ * If the function is declared to return void, the Python return value
+ * must be None. For void-returning functions, we also treat a None
+ * return value as a special "void datum" rather than NULL (as is the
+ * case for non-void-returning functions).
*/
if (proc->result.out.d.typoid == VOIDOID)
{
if (plrv != Py_None)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("invalid return value from plpython function"),
+ errmsg("invalid return value from plpython function"),
errdetail("Functions returning type \"void\" must return None.")));
fcinfo->isnull = false;
fcinfo->isnull = true;
if (proc->result.is_rowtype < 1)
rv = InputFunctionCall(&proc->result.out.d.typfunc,
- NULL,
- proc->result.out.d.typioparam,
- -1);
+ NULL,
+ proc->result.out.d.typioparam,
+ -1);
else
/* Tuple as None */
rv = (Datum) NULL;
}
else if (proc->result.is_rowtype >= 1)
{
- HeapTuple tuple = NULL;
+ HeapTuple tuple = NULL;
if (PySequence_Check(plrv))
/* composite type as sequence (tuple, list etc) */
}
if (PyList_SetItem(args, i, arg) == -1 ||
- (proc->argnames &&
- PyDict_SetItemString(proc->globals, proc->argnames[i], arg) == -1))
+ (proc->argnames &&
+ PyDict_SetItemString(proc->globals, proc->argnames[i], arg) == -1))
PLy_elog(ERROR, "problem setting up arguments for \"%s\"", proc->proname);
arg = NULL;
}
static void
-PLy_function_delete_args(PLyProcedure *proc)
+PLy_function_delete_args(PLyProcedure * proc)
{
- int i;
+ int i;
if (!proc->argnames)
return;
- for (i = 0; i < proc->nargs; i++)
+ for (i = 0; i < proc->nargs; i++)
PyDict_DelItemString(proc->globals, proc->argnames[i]);
}
int i,
rv;
Datum argnames;
- Datum *elems;
- int nelems;
+ Datum *elems;
+ int nelems;
procStruct = (Form_pg_proc) GETSTRUCT(procTup);
if (rvTypeStruct->typtype == 'c')
{
- /* Tuple: set up later, during first call to PLy_function_handler */
+ /*
+ * Tuple: set up later, during first call to
+ * PLy_function_handler
+ */
proc->result.out.d.typoid = procStruct->prorettype;
proc->result.is_rowtype = 2;
}
}
/*
- * now get information required for input conversion of the procedure's
- * arguments.
+ * now get information required for input conversion of the
+ * procedure's arguments.
*/
proc->nargs = fcinfo->nargs;
if (proc->nargs)
if (!isnull)
{
deconstruct_array(DatumGetArrayTypeP(argnames), TEXTOID, -1, false, 'i',
- &elems, NULL, &nelems);
+ &elems, NULL, &nelems);
if (nelems != proc->nargs)
elog(ERROR,
- "proargnames must have the same number of elements "
- "as the function has arguments");
- proc->argnames = (char **) PLy_malloc(sizeof(char *)*proc->nargs);
+ "proargnames must have the same number of elements "
+ "as the function has arguments");
+ proc->argnames = (char **) PLy_malloc(sizeof(char *) * proc->nargs);
}
}
for (i = 0; i < fcinfo->nargs; i++)
proc->globals = PyDict_Copy(PLy_interp_globals);
/*
- * SD is private preserved data between calls. GD is global data
- * shared by all functions
+ * SD is private preserved data between calls. GD is global data shared by
+ * all functions
*/
proc->statics = PyDict_New();
PyDict_SetItemString(proc->globals, "SD", proc->statics);
static HeapTuple
-PLyMapping_ToTuple(PLyTypeInfo *info, PyObject *mapping)
+PLyMapping_ToTuple(PLyTypeInfo * info, PyObject * mapping)
{
TupleDesc desc;
HeapTuple tuple;
- Datum *values;
- char *nulls;
- int i;
+ Datum *values;
+ char *nulls;
+ int i;
Assert(PyMapping_Check(mapping));
Assert(info->is_rowtype == 1);
/* Build tuple */
- values = palloc(sizeof(Datum)*desc->natts);
- nulls = palloc(sizeof(char)*desc->natts);
- for (i = 0; i < desc->natts; ++i)
+ values = palloc(sizeof(Datum) * desc->natts);
+ nulls = palloc(sizeof(char) * desc->natts);
+ for (i = 0; i < desc->natts; ++i)
{
- char *key;
- PyObject *value,
- *so;
+ char *key;
+ PyObject *value,
+ *so;
key = NameStr(desc->attrs[i]->attname);
value = so = NULL;
}
else if (value)
{
- char *valuestr;
+ char *valuestr;
so = PyObject_Str(value);
if (so == NULL)
valuestr = PyString_AsString(so);
values[i] = InputFunctionCall(&info->out.r.atts[i].typfunc
- , valuestr
- , info->out.r.atts[i].typioparam
- , -1);
+ ,valuestr
+ ,info->out.r.atts[i].typioparam
+ ,-1);
Py_DECREF(so);
so = NULL;
nulls[i] = ' ';
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("no mapping found with key \"%s\"", key),
errhint("to return null in specific column, "
- "add value None to map with key named after column")));
+ "add value None to map with key named after column")));
Py_XDECREF(value);
value = NULL;
static HeapTuple
-PLySequence_ToTuple(PLyTypeInfo *info, PyObject *sequence)
+PLySequence_ToTuple(PLyTypeInfo * info, PyObject * sequence)
{
TupleDesc desc;
HeapTuple tuple;
- Datum *values;
- char *nulls;
- int i;
+ Datum *values;
+ char *nulls;
+ int i;
Assert(PySequence_Check(sequence));
/*
* Check that sequence length is exactly same as PG tuple's. We actually
- * can ignore exceeding items or assume missing ones as null but to
- * avoid plpython developer's errors we are strict here
+ * can ignore exceeding items or assume missing ones as null but to avoid
+ * plpython developer's errors we are strict here
*/
desc = lookup_rowtype_tupdesc(info->out.d.typoid, -1);
if (PySequence_Length(sequence) != desc->natts)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("returned sequence's length must be same as tuple's length")));
+ errmsg("returned sequence's length must be same as tuple's length")));
if (info->is_rowtype == 2)
PLy_output_tuple_funcs(info, desc);
Assert(info->is_rowtype == 1);
/* Build tuple */
- values = palloc(sizeof(Datum)*desc->natts);
- nulls = palloc(sizeof(char)*desc->natts);
- for (i = 0; i < desc->natts; ++i)
+ values = palloc(sizeof(Datum) * desc->natts);
+ nulls = palloc(sizeof(char) * desc->natts);
+ for (i = 0; i < desc->natts; ++i)
{
- PyObject *value,
- *so;
+ PyObject *value,
+ *so;
value = so = NULL;
PG_TRY();
}
else if (value)
{
- char *valuestr;
+ char *valuestr;
so = PyObject_Str(value);
if (so == NULL)
PLy_elog(ERROR, "can't convert sequence type");
valuestr = PyString_AsString(so);
values[i] = InputFunctionCall(&info->out.r.atts[i].typfunc
- , valuestr
- , info->out.r.atts[i].typioparam
- , -1);
+ ,valuestr
+ ,info->out.r.atts[i].typioparam
+ ,-1);
Py_DECREF(so);
so = NULL;
nulls[i] = ' ';
static HeapTuple
-PLyObject_ToTuple(PLyTypeInfo *info, PyObject *object)
+PLyObject_ToTuple(PLyTypeInfo * info, PyObject * object)
{
TupleDesc desc;
HeapTuple tuple;
- Datum *values;
- char *nulls;
- int i;
+ Datum *values;
+ char *nulls;
+ int i;
desc = lookup_rowtype_tupdesc(info->out.d.typoid, -1);
if (info->is_rowtype == 2)
Assert(info->is_rowtype == 1);
/* Build tuple */
- values = palloc(sizeof(Datum)*desc->natts);
- nulls = palloc(sizeof(char)*desc->natts);
- for (i = 0; i < desc->natts; ++i)
+ values = palloc(sizeof(Datum) * desc->natts);
+ nulls = palloc(sizeof(char) * desc->natts);
+ for (i = 0; i < desc->natts; ++i)
{
- char *key;
- PyObject *value,
- *so;
+ char *key;
+ PyObject *value,
+ *so;
key = NameStr(desc->attrs[i]->attname);
value = so = NULL;
}
else if (value)
{
- char *valuestr;
+ char *valuestr;
so = PyObject_Str(value);
if (so == NULL)
PLy_elog(ERROR, "can't convert object type");
valuestr = PyString_AsString(so);
values[i] = InputFunctionCall(&info->out.r.atts[i].typfunc
- , valuestr
- , info->out.r.atts[i].typioparam
- , -1);
+ ,valuestr
+ ,info->out.r.atts[i].typioparam
+ ,-1);
Py_DECREF(so);
so = NULL;
nulls[i] = ' ';
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("no attribute named \"%s\"", key),
errhint("to return null in specific column, "
- "let returned object to have attribute named "
- "after column with value None")));
+ "let returned object to have attribute named "
+ "after column with value None")));
Py_XDECREF(value);
value = NULL;
PG_TRY();
{
- char *sv = PyString_AsString(so);
+ char *sv = PyString_AsString(so);
plan->values[i] =
InputFunctionCall(&(plan->args[i].out.d.typfunc),
* pltcl.c - PostgreSQL support for Tcl as
* procedural language (PL)
*
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.107 2006/08/27 23:47:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.108 2006/10/04 00:30:14 momjian Exp $
*
**********************************************************************/
pfree(stroid);
/* The name of the table the trigger is acting on: TG_table_name */
- stroid = SPI_getrelname(trigdata->tg_relation);
+ stroid = SPI_getrelname(trigdata->tg_relation);
Tcl_DStringAppendElement(&tcl_cmd, stroid);
- pfree(stroid);
-
+ pfree(stroid);
+
/* The schema of the table the trigger is acting on: TG_table_schema */
- stroid = SPI_getnspname(trigdata->tg_relation);
+ stroid = SPI_getnspname(trigdata->tg_relation);
Tcl_DStringAppendElement(&tcl_cmd, stroid);
- pfree(stroid);
-
+ pfree(stroid);
+
/* A list of attribute names for argument TG_relatts */
Tcl_DStringAppendElement(&tcl_trigtup, "");
for (i = 0; i < tupdesc->natts; i++)
fmgr_info(typinput, &finfo);
UTF_BEGIN;
modvalues[attnum - 1] = InputFunctionCall(&finfo,
- (char *) UTF_U2E(ret_value),
+ (char *) UTF_U2E(ret_value),
typioparam,
- tupdesc->attrs[attnum - 1]->atttypmod);
+ tupdesc->attrs[attnum - 1]->atttypmod);
UTF_END;
}
{
UTF_BEGIN;
argvalues[j] = InputFunctionCall(&qdesc->arginfuncs[j],
- (char *) UTF_U2E(callargs[j]),
+ (char *) UTF_U2E(callargs[j]),
qdesc->argtypioparams[j],
-1);
UTF_END;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/dirent.c,v 1.2 2006/06/26 12:58:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/dirent.c,v 1.3 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
-struct DIR {
- char *dirname;
- struct dirent ret; /* Used to return to caller */
- HANDLE handle;
+struct DIR
+{
+ char *dirname;
+ struct dirent ret; /* Used to return to caller */
+ HANDLE handle;
};
-DIR* opendir(const char *dirname)
+DIR *
+opendir(const char *dirname)
{
- DWORD attr;
- DIR *d;
+ DWORD attr;
+ DIR *d;
/* Make sure it is a directory */
attr = GetFileAttributes(dirname);
- if (attr == INVALID_FILE_ATTRIBUTES)
+ if (attr == INVALID_FILE_ATTRIBUTES)
{
errno = ENOENT;
return NULL;
errno = ENOMEM;
return NULL;
}
- d->dirname = malloc(strlen(dirname)+4);
+ d->dirname = malloc(strlen(dirname) + 4);
if (!d->dirname)
{
errno = ENOMEM;
return NULL;
}
strcpy(d->dirname, dirname);
- if (d->dirname[strlen(d->dirname)-1] != '/' &&
- d->dirname[strlen(d->dirname)-1] != '\\')
- strcat(d->dirname,"\\"); /* Append backslash if not already there */
- strcat(d->dirname,"*"); /* Search for entries named anything */
+ if (d->dirname[strlen(d->dirname) - 1] != '/' &&
+ d->dirname[strlen(d->dirname) - 1] != '\\')
+ strcat(d->dirname, "\\"); /* Append backslash if not already
+ * there */
+ strcat(d->dirname, "*"); /* Search for entries named anything */
d->handle = INVALID_HANDLE_VALUE;
- d->ret.d_ino = 0; /* no inodes on win32 */
- d->ret.d_reclen = 0; /* not used on win32 */
+ d->ret.d_ino = 0; /* no inodes on win32 */
+ d->ret.d_reclen = 0; /* not used on win32 */
return d;
}
-struct dirent* readdir(DIR * d)
+struct dirent *
+readdir(DIR *d)
{
WIN32_FIND_DATA fd;
return NULL;
}
}
- else
+ else
{
if (!FindNextFile(d->handle, &fd))
{
return NULL;
}
}
- strcpy(d->ret.d_name, fd.cFileName); /* Both strings are MAX_PATH long */
+ strcpy(d->ret.d_name, fd.cFileName); /* Both strings are MAX_PATH
+ * long */
d->ret.d_namlen = strlen(d->ret.d_name);
return &d->ret;
}
-int closedir(DIR *d)
+int
+closedir(DIR *d)
{
if (d->handle != INVALID_HANDLE_VALUE)
FindClose(d->handle);
* Copyright (c) 2003-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/getaddrinfo.c,v 1.25 2006/07/16 20:28:01 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/getaddrinfo.c,v 1.26 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return "Not enough memory";
#endif
#ifdef EAI_NODATA
-#ifndef WIN32_ONLY_COMPILER /* MSVC complains because another case has the same value */
+#ifndef WIN32_ONLY_COMPILER /* MSVC complains because another case has the
+ * same value */
case EAI_NODATA:
return "No host data of that type was found";
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/isinf.c,v 1.9 2006/03/05 15:59:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/isinf.c,v 1.10 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return 0;
}
#endif
+
#endif
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/open.c,v 1.16 2006/10/03 20:44:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/open.c,v 1.17 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if ((fd = _open_osfhandle((long) h, fileFlags & O_APPEND)) < 0)
CloseHandle(h); /* will not affect errno */
else if (fileFlags & (O_TEXT | O_BINARY) &&
- _setmode(fd, fileFlags & (O_TEXT | O_BINARY)) < 0)
+ _setmode(fd, fileFlags & (O_TEXT | O_BINARY)) < 0)
{
_close(fd);
return -1;
FILE *
pgwin32_fopen(const char *fileName, const char *mode)
{
- int openmode = 0;
- int fd;
-
+ int openmode = 0;
+ int fd;
+
if (strstr(mode, "r+"))
openmode |= O_RDWR;
else if (strchr(mode, 'r'))
openmode |= O_BINARY;
if (strchr(mode, 't'))
openmode |= O_TEXT;
-
+
fd = pgwin32_open(fileName, openmode);
if (fd == -1)
return NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/path.c,v 1.69 2006/09/27 18:40:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/path.c,v 1.70 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
const char *
get_progname(const char *argv0)
{
- const char *nodir_name;
- char *progname;
+ const char *nodir_name;
+ char *progname;
nodir_name = last_dir_separator(argv0);
if (nodir_name)
nodir_name = skip_drive(argv0);
/*
- * Make a copy in case argv[0] is modified by ps_status.
- * Leaks memory, but called only once.
+ * Make a copy in case argv[0] is modified by ps_status. Leaks memory, but
+ * called only once.
*/
progname = strdup(nodir_name);
if (progname == NULL)
{
fprintf(stderr, "%s: out of memory\n", nodir_name);
- exit(1); /* This could exit the postmaster */
+ exit(1); /* This could exit the postmaster */
}
#if defined(__CYGWIN__) || defined(WIN32)
/* strip ".exe" suffix, regardless of case */
if (strlen(progname) > sizeof(EXE) - 1 &&
- pg_strcasecmp(progname + strlen(progname) - (sizeof(EXE) - 1), EXE) == 0)
+ pg_strcasecmp(progname + strlen(progname) - (sizeof(EXE) - 1), EXE) == 0)
progname[strlen(progname) - (sizeof(EXE) - 1)] = '\0';
#endif
*/
tail_start = (int) strlen(ret_path) - tail_len;
if (tail_start > 0 &&
- IS_DIR_SEP(ret_path[tail_start-1]) &&
+ IS_DIR_SEP(ret_path[tail_start - 1]) &&
dir_strcmp(ret_path + tail_start, bin_path + prefix_len) == 0)
{
ret_path[tail_start] = '\0';
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/pgsleep.c,v 1.8 2006/07/16 20:17:04 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/pgsleep.c,v 1.9 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
}
-#endif /* defined(FRONTEND) || !defined(WIN32) */
+#endif /* defined(FRONTEND) || !defined(WIN32) */
*
* CAUTION: if you change this file, see also qsort.c
*
- * $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.1 2006/10/03 22:18:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.2 2006/10/04 00:30:14 momjian Exp $
*/
/* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
static char *med3(char *a, char *b, char *c,
- qsort_arg_comparator cmp, void *arg);
+ qsort_arg_comparator cmp, void *arg);
static void swapfunc(char *, char *, size_t, int);
#define min(a, b) ((a) < (b) ? (a) : (b))
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $PostgreSQL: pgsql/src/port/snprintf.c,v 1.31 2005/12/05 21:57:00 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/snprintf.c,v 1.32 2006/10/04 00:30:14 momjian Exp $
*/
#include "c.h"
* platforms. This implementation is compatible with the Single Unix Spec:
*
* 1. -1 is returned only if processing is abandoned due to an invalid
- * parameter, such as incorrect format string. (Although not required by
+ * parameter, such as incorrect format string. (Although not required by
* the spec, this happens only when no characters have yet been transmitted
* to the destination.)
*
int
pg_vsnprintf(char *str, size_t count, const char *fmt, va_list args)
{
- PrintfTarget target;
+ PrintfTarget target;
if (str == NULL || count == 0)
return 0;
static int
pg_vsprintf(char *str, const char *fmt, va_list args)
{
- PrintfTarget target;
+ PrintfTarget target;
if (str == NULL)
return 0;
static int
pg_vfprintf(FILE *stream, const char *fmt, va_list args)
{
- PrintfTarget target;
+ PrintfTarget target;
char buffer[1024]; /* size is arbitrary */
if (stream == NULL)
static void
flushbuffer(PrintfTarget *target)
{
- size_t nc = target->bufptr - target->bufstart;
+ size_t nc = target->bufptr - target->bufstart;
if (nc > 0)
target->nchars += fwrite(target->bufstart, 1, nc, target->stream);
PrintfTarget *target);
static void fmtchar(int value, int leftjust, int minlen, PrintfTarget *target);
static void fmtfloat(double value, char type, int forcesign,
- int leftjust, int minlen, int zpad, int precision, int pointflag,
- PrintfTarget *target);
+ int leftjust, int minlen, int zpad, int precision, int pointflag,
+ PrintfTarget *target);
static void dostr(const char *str, int slen, PrintfTarget *target);
static void dopr_outch(int c, PrintfTarget *target);
static int adjust_sign(int is_negative, int forcesign, int *signvalue);
double fvalue;
char *strvalue;
int i;
- PrintfArgType argtypes[NL_ARGMAX+1];
- PrintfArgValue argvalues[NL_ARGMAX+1];
+ PrintfArgType argtypes[NL_ARGMAX + 1];
+ PrintfArgValue argvalues[NL_ARGMAX + 1];
/*
* Parse the format string to determine whether there are %n$ format
longflag = longlongflag = pointflag = 0;
fmtpos = accum = 0;
afterstar = false;
- nextch1:
+nextch1:
ch = *format++;
if (ch == '\0')
break; /* illegal, but we don't complain */
goto nextch1;
case '*':
if (afterstar)
- have_non_dollar = true; /* multiple stars */
+ have_non_dollar = true; /* multiple stars */
afterstar = true;
accum = 0;
goto nextch1;
case '%':
break;
}
+
/*
* If we finish the spec with afterstar still set, there's a
* non-dollar star in there.
longflag = longlongflag = pointflag = 0;
fmtpos = accum = 0;
have_star = afterstar = false;
- nextch2:
+nextch2:
ch = *format++;
if (ch == '\0')
break; /* illegal, but we don't complain */
else
{
/* fetch and process value now */
- int starval = va_arg(args, int);
+ int starval = va_arg(args, int);
if (pointflag)
{
if (afterstar)
{
/* fetch and process star value */
- int starval = argvalues[accum].i;
+ int starval = argvalues[accum].i;
if (pointflag)
{
else
{
/* make integer string */
- uint64 uvalue = (uint64) value;
+ uint64 uvalue = (uint64) value;
do
{
{
while (slen > 0)
{
- int avail;
+ int avail;
if (target->bufend != NULL)
avail = target->bufend - target->bufptr;
- else
+ else
avail = slen;
if (avail <= 0)
{
/* buffer full, can we dump to stream? */
if (target->stream == NULL)
- return; /* no, lose the data */
+ return; /* no, lose the data */
flushbuffer(target);
continue;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.17 2006/06/14 16:49:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.18 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
termout = fopen(DEVTTY, "w");
if (!termin || !termout
#ifdef WIN32
- /* See DEVTTY comment for msys */
+ /* See DEVTTY comment for msys */
|| (getenv("OSTYPE") && strcmp(getenv("OSTYPE"), "msys") == 0)
#endif
)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/strlcpy.c,v 1.2 2006/10/02 23:58:59 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/strlcpy.c,v 1.3 2006/10/04 00:30:14 momjian Exp $
*
* This file was taken from OpenBSD and is used on platforms that don't
* provide strlcpy(). The OpenBSD copyright terms follow.
/*
- * Copy src to string dst of size siz. At most siz-1 characters
- * will be copied. Always NUL terminates (unless siz == 0).
+ * Copy src to string dst of size siz. At most siz-1 characters
+ * will be copied. Always NUL terminates (unless siz == 0).
* Returns strlen(src); if retval >= siz, truncation occurred.
* Function creation history: http://www.gratisoft.us/todd/papers/strlcpy.html
*/
size_t
strlcpy(char *dst, const char *src, size_t siz)
{
- char *d = dst;
+ char *d = dst;
const char *s = src;
- size_t n = siz;
+ size_t n = siz;
/* Copy as many bytes as will fit */
- if (n != 0) {
- while (--n != 0) {
+ if (n != 0)
+ {
+ while (--n != 0)
+ {
if ((*d++ = *s++) == '\0')
break;
}
}
/* Not enough room in dst, add NUL and traverse rest of src */
- if (n == 0) {
+ if (n == 0)
+ {
if (siz != 0)
- *d = '\0'; /* NUL-terminate dst */
+ *d = '\0'; /* NUL-terminate dst */
while (*s++)
;
}
- return(s - src - 1); /* count does not include NUL */
+ return (s - src - 1); /* count does not include NUL */
}
* before calling this function, and then errno != 0 can be tested
* after the function completes.
*/
-
+
/*
* Convert a string to a long integer.
*
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.22 2006/09/24 17:10:18 tgl Exp $
+ * $PostgreSQL: pgsql/src/test/regress/pg_regress.c,v 1.23 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* simple list of strings */
typedef struct _stringlist
{
- char *str;
+ char *str;
struct _stringlist *next;
-} _stringlist;
+} _stringlist;
/* for resultmap we need a list of pairs of strings */
typedef struct _resultmap
{
- char *test;
- char *resultfile;
+ char *test;
+ char *resultfile;
struct _resultmap *next;
-} _resultmap;
+} _resultmap;
/*
* Values obtained from pg_config_paths.h and Makefile. The PG installation
static char *datadir = PGSHAREDIR;
static char *host_platform = HOST_TUPLE;
static char *makeprog = MAKEPROG;
+
#ifndef WIN32 /* not used in WIN32 case */
static char *shellprog = SHELLPROG;
#endif
static char *inputdir = ".";
static char *outputdir = ".";
static _stringlist *loadlanguage = NULL;
-static int max_connections = 0;
+static int max_connections = 0;
static char *encoding = NULL;
static _stringlist *schedulelist = NULL;
static _stringlist *extra_tests = NULL;
static char *temp_install = NULL;
static char *top_builddir = NULL;
-static int temp_port = 65432;
+static int temp_port = 65432;
static bool nolocale = false;
static char *psqldir = NULL;
static char *hostname = NULL;
-static int port = -1;
+static int port = -1;
static char *user = NULL;
/* internal variables */
static PID_TYPE postmaster_pid = INVALID_PID;
static bool postmaster_running = false;
-static int success_count = 0;
-static int fail_count = 0;
-static int fail_ignore_count = 0;
+static int success_count = 0;
+static int fail_count = 0;
+static int fail_ignore_count = 0;
static void
header(const char *fmt,...)
the supplied arguments. */
__attribute__((format(printf, 1, 2)));
static void
-psql_command(const char *database, const char *query, ...)
+psql_command(const char *database, const char *query,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
__attribute__((format(printf, 2, 3)));
* Add an item at the end of a stringlist.
*/
static void
-add_stringlist_item(_stringlist **listhead, const char *str)
+add_stringlist_item(_stringlist ** listhead, const char *str)
{
_stringlist *newentry = malloc(sizeof(_stringlist));
_stringlist *oldentry;
else
{
for (oldentry = *listhead; oldentry->next; oldentry = oldentry->next)
- /*skip*/;
+ /* skip */ ;
oldentry->next = newentry;
}
}
if (postmaster_running)
{
/* We use pg_ctl to issue the kill and wait for stop */
- char buf[MAXPGPATH * 2];
+ char buf[MAXPGPATH * 2];
/* On Windows, system() seems not to force fflush, so... */
fflush(stdout);
* Scan resultmap file to find which platform-specific expected files to use.
*
* The format of each line of the file is
- * testname/hostplatformpattern=substitutefile
+ * testname/hostplatformpattern=substitutefile
* where the hostplatformpattern is evaluated per the rules of expr(1),
* namely, it is a standard regular expression with an implicit ^ at the start.
* (We currently support only a very limited subset of regular expressions,
* see string_matches_pattern() above.) What hostplatformpattern will be
- * matched against is the config.guess output. (In the shell-script version,
+ * matched against is the config.guess output. (In the shell-script version,
* we also provided an indication of whether gcc or another compiler was in
* use, but that facility isn't used anymore.)
*/
static void
load_resultmap(void)
{
- char buf[MAXPGPATH];
- FILE *f;
+ char buf[MAXPGPATH];
+ FILE *f;
/* scan the file ... */
snprintf(buf, sizeof(buf), "%s/resultmap", inputdir);
- f = fopen(buf,"r");
+ f = fopen(buf, "r");
if (!f)
{
/* OK if it doesn't exist, else complain */
while (fgets(buf, sizeof(buf), f))
{
- char *platform;
- char *expected;
- int i;
+ char *platform;
+ char *expected;
+ int i;
/* strip trailing whitespace, especially the newline */
i = strlen(buf);
- while (i > 0 && isspace((unsigned char) buf[i-1]))
+ while (i > 0 && isspace((unsigned char) buf[i - 1]))
buf[--i] = '\0';
/* parse out the line fields */
*expected++ = '\0';
/*
- * if it's for current platform, save it in resultmap list.
- * Note: by adding at the front of the list, we ensure that in
- * ambiguous cases, the last match in the resultmap file is used.
- * This mimics the behavior of the old shell script.
+ * if it's for current platform, save it in resultmap list. Note: by
+ * adding at the front of the list, we ensure that in ambiguous cases,
+ * the last match in the resultmap file is used. This mimics the
+ * behavior of the old shell script.
*/
if (string_matches_pattern(host_platform, platform))
{
static void
doputenv(const char *var, const char *val)
{
- char *s = malloc(strlen(var)+strlen(val)+2);
+ char *s = malloc(strlen(var) + strlen(val) + 2);
sprintf(s, "%s=%s", var, val);
putenv(s);
static void
add_to_path(const char *pathname, char separator, const char *addval)
{
- char *oldval = getenv(pathname);
- char *newval;
+ char *oldval = getenv(pathname);
+ char *newval;
if (!oldval || !oldval[0])
{
else
{
newval = malloc(strlen(pathname) + strlen(addval) + strlen(oldval) + 3);
- sprintf(newval,"%s=%s%c%s",pathname,addval,separator,oldval);
+ sprintf(newval, "%s=%s%c%s", pathname, addval, separator, oldval);
}
putenv(newval);
}
static void
initialize_environment(void)
{
- char *tmp;
+ char *tmp;
/*
* Clear out any non-C locale settings
if (temp_install)
{
/*
- * Clear out any environment vars that might cause psql to connect
- * to the wrong postmaster, or otherwise behave in nondefault ways.
- * (Note we also use psql's -X switch consistently, so that ~/.psqlrc
- * files won't mess things up.) Also, set PGPORT to the temp port,
- * and set or unset PGHOST depending on whether we are using TCP or
- * Unix sockets.
+ * Clear out any environment vars that might cause psql to connect to
+ * the wrong postmaster, or otherwise behave in nondefault ways. (Note
+ * we also use psql's -X switch consistently, so that ~/.psqlrc files
+ * won't mess things up.) Also, set PGPORT to the temp port, and set
+ * or unset PGHOST depending on whether we are using TCP or Unix
+ * sockets.
*/
unsetenv("PGDATABASE");
unsetenv("PGUSER");
unsetenv("PGHOSTADDR");
if (port != -1)
{
- char s[16];
+ char s[16];
- sprintf(s,"%d",port);
- doputenv("PGPORT",s);
+ sprintf(s, "%d", port);
+ doputenv("PGPORT", s);
}
/*
* Set up shared library paths to include the temp install.
*
* LD_LIBRARY_PATH covers many platforms. DYLD_LIBRARY_PATH works on
- * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
+ * Darwin, and maybe other Mach-based systems. LIBPATH is for AIX.
* Windows needs shared libraries in PATH (only those linked into
- * executables, not dlopen'ed ones).
- * Feel free to account for others as well.
+ * executables, not dlopen'ed ones). Feel free to account for others
+ * as well.
*/
add_to_path("LD_LIBRARY_PATH", ':', libdir);
add_to_path("DYLD_LIBRARY_PATH", ':', libdir);
}
if (port != -1)
{
- char s[16];
+ char s[16];
- sprintf(s,"%d",port);
- doputenv("PGPORT",s);
+ sprintf(s, "%d", port);
+ doputenv("PGPORT", s);
}
if (user != NULL)
doputenv("PGUSER", user);
* Since we use system(), this doesn't return until the operation finishes
*/
static void
-psql_command(const char *database, const char *query, ...)
+psql_command(const char *database, const char *query,...)
{
- char query_formatted[1024];
- char query_escaped[2048];
- char psql_cmd[MAXPGPATH + 2048];
- va_list args;
- char *s;
- char *d;
+ char query_formatted[1024];
+ char query_escaped[2048];
+ char psql_cmd[MAXPGPATH + 2048];
+ va_list args;
+ char *s;
+ char *d;
/* Generate the query with insertion of sprintf arguments */
va_start(args, query);
spawn_process(const char *cmdline)
{
#ifndef WIN32
- pid_t pid;
+ pid_t pid;
/*
- * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
+ * Must flush I/O buffers before fork. Ideally we'd use fflush(NULL) here
* ... does anyone still care about systems where that doesn't work?
*/
fflush(stdout);
/*
* In child
*
- * Instead of using system(), exec the shell directly, and tell it
- * to "exec" the command too. This saves two useless processes
- * per parallel test case.
+ * Instead of using system(), exec the shell directly, and tell it to
+ * "exec" the command too. This saves two useless processes per
+ * parallel test case.
*/
- char *cmdline2 = malloc(strlen(cmdline) + 6);
+ char *cmdline2 = malloc(strlen(cmdline) + 6);
sprintf(cmdline2, "exec %s", cmdline);
execl(shellprog, shellprog, "-c", cmdline2, NULL);
/* in parent */
return pid;
#else
- char *cmdline2;
+ char *cmdline2;
STARTUPINFO si;
PROCESS_INFORMATION pi;
static PID_TYPE
psql_start_test(const char *testname)
{
- PID_TYPE pid;
- char infile[MAXPGPATH];
- char outfile[MAXPGPATH];
- char psql_cmd[MAXPGPATH * 3];
+ PID_TYPE pid;
+ char infile[MAXPGPATH];
+ char outfile[MAXPGPATH];
+ char psql_cmd[MAXPGPATH * 3];
snprintf(infile, sizeof(infile), "%s/sql/%s.sql",
inputdir, testname);
static long
file_size(const char *file)
{
- long r;
- FILE *f = fopen(file,"r");
+ long r;
+ FILE *f = fopen(file, "r");
if (!f)
{
static int
file_line_count(const char *file)
{
- int c;
- int l = 0;
- FILE *f = fopen(file,"r");
+ int c;
+ int l = 0;
+ FILE *f = fopen(file, "r");
if (!f)
{
static bool
file_exists(const char *file)
{
- FILE *f = fopen(file, "r");
+ FILE *f = fopen(file, "r");
if (!f)
return false;
static int
run_diff(const char *cmd, const char *filename)
{
- int r;
+ int r;
r = system(cmd);
if (!WIFEXITED(r) || WEXITSTATUS(r) > 1)
exit_nicely(2);
}
#ifdef WIN32
+
/*
- * On WIN32, if the 'diff' command cannot be found, system() returns
- * 1, but produces nothing to stdout, so we check for that here.
+ * On WIN32, if the 'diff' command cannot be found, system() returns 1,
+ * but produces nothing to stdout, so we check for that here.
*/
if (WEXITSTATUS(r) == 1 && file_size(filename) <= 0)
{
exit_nicely(2);
}
#endif
-
+
return WEXITSTATUS(r);
}
results_differ(const char *testname)
{
const char *expectname;
- char resultsfile[MAXPGPATH];
- char expectfile[MAXPGPATH];
- char diff[MAXPGPATH];
- char cmd[MAXPGPATH * 3];
- char best_expect_file[MAXPGPATH];
+ char resultsfile[MAXPGPATH];
+ char expectfile[MAXPGPATH];
+ char diff[MAXPGPATH];
+ char cmd[MAXPGPATH * 3];
+ char best_expect_file[MAXPGPATH];
_resultmap *rm;
- FILE *difffile;
- int best_line_count;
- int i;
- int l;
-
+ FILE *difffile;
+ int best_line_count;
+ int i;
+ int l;
+
/* Check in resultmap if we should be looking at a different file */
expectname = testname;
for (rm = resultmap; rm != NULL; rm = rm->next)
}
}
- /*
- * fall back on the canonical results file if we haven't tried it yet
- * and haven't found a complete match yet.
+ /*
+ * fall back on the canonical results file if we haven't tried it yet and
+ * haven't found a complete match yet.
*/
if (strcmp(expectname, testname) != 0)
}
/*
- * Use the best comparison file to generate the "pretty" diff, which
- * we append to the diffs summary file.
+ * Use the best comparison file to generate the "pretty" diff, which we
+ * append to the diffs summary file.
*/
snprintf(cmd, sizeof(cmd),
SYSTEMQUOTE "diff %s \"%s\" \"%s\" >> \"%s\"" SYSTEMQUOTE,
* Note: it's OK to scribble on the pids array, but not on the names array
*/
static void
-wait_for_tests(PID_TYPE *pids, char **names, int num_tests)
+wait_for_tests(PID_TYPE * pids, char **names, int num_tests)
{
- int tests_left;
- int i;
+ int tests_left;
+ int i;
#ifdef WIN32
- PID_TYPE *active_pids = malloc(num_tests * sizeof(PID_TYPE));
+ PID_TYPE *active_pids = malloc(num_tests * sizeof(PID_TYPE));
memcpy(active_pids, pids, num_tests * sizeof(PID_TYPE));
#endif
tests_left = num_tests;
while (tests_left > 0)
{
- PID_TYPE p;
+ PID_TYPE p;
#ifndef WIN32
p = wait(NULL);
exit_nicely(2);
}
#else
- int r;
+ int r;
r = WaitForMultipleObjects(tests_left, active_pids, FALSE, INFINITE);
if (r < WAIT_OBJECT_0 || r >= WAIT_OBJECT_0 + tests_left)
p = active_pids[r - WAIT_OBJECT_0];
/* compact the active_pids array */
active_pids[r - WAIT_OBJECT_0] = active_pids[tests_left - 1];
-#endif /* WIN32 */
+#endif /* WIN32 */
- for (i=0; i < num_tests; i++)
+ for (i = 0; i < num_tests; i++)
{
if (p == pids[i])
{
run_schedule(const char *schedule)
{
#define MAX_PARALLEL_TESTS 100
- char *tests[MAX_PARALLEL_TESTS];
- PID_TYPE pids[MAX_PARALLEL_TESTS];
+ char *tests[MAX_PARALLEL_TESTS];
+ PID_TYPE pids[MAX_PARALLEL_TESTS];
_stringlist *ignorelist = NULL;
- char scbuf[1024];
- FILE *scf;
- int line_num = 0;
+ char scbuf[1024];
+ FILE *scf;
+ int line_num = 0;
scf = fopen(schedule, "r");
if (!scf)
while (fgets(scbuf, sizeof(scbuf), scf))
{
- char *test = NULL;
- char *c;
- int num_tests;
- bool inword;
- int i;
+ char *test = NULL;
+ char *c;
+ int num_tests;
+ bool inword;
+ int i;
line_num++;
/* strip trailing whitespace, especially the newline */
i = strlen(scbuf);
- while (i > 0 && isspace((unsigned char) scbuf[i-1]))
+ while (i > 0 && isspace((unsigned char) scbuf[i - 1]))
scbuf[--i] = '\0';
if (scbuf[0] == '\0' || scbuf[0] == '#')
while (*c && isspace((unsigned char) *c))
c++;
add_stringlist_item(&ignorelist, c);
+
/*
* Note: ignore: lines do not run the test, they just say that
- * failure of this test when run later on is to be ignored.
- * A bit odd but that's how the shell-script version did it.
+ * failure of this test when run later on is to be ignored. A bit
+ * odd but that's how the shell-script version did it.
*/
continue;
}
}
else if (max_connections > 0 && max_connections < num_tests)
{
- int oldest = 0;
+ int oldest = 0;
status(_("parallel group (%d tests, in groups of %d): "),
num_tests, max_connections);
if (results_differ(tests[i]))
{
- bool ignore = false;
+ bool ignore = false;
_stringlist *sl;
for (sl = ignorelist; sl != NULL; sl = sl->next)
static void
run_single_test(const char *test)
{
- PID_TYPE pid;
+ PID_TYPE pid;
status(_("test %-20s ... "), test);
pid = psql_start_test(test);
static void
open_result_files(void)
{
- char file[MAXPGPATH];
- FILE *difffile;
+ char file[MAXPGPATH];
+ FILE *difffile;
/* create the log file (copy of running status output) */
snprintf(file, sizeof(file), "%s/regression.out", outputdir);
main(int argc, char *argv[])
{
_stringlist *sl;
- int c;
- int i;
- int option_index;
- char buf[MAXPGPATH * 4];
+ int c;
+ int i;
+ int option_index;
+ char buf[MAXPGPATH * 4];
static struct option long_options[] = {
{"help", no_argument, NULL, 'h'},
temp_install = strdup(optarg);
else
{
- char cwdbuf[MAXPGPATH];
+ char cwdbuf[MAXPGPATH];
if (!getcwd(cwdbuf, sizeof(cwdbuf)))
{
exit_nicely(2);
}
temp_install = malloc(strlen(cwdbuf) + strlen(optarg) + 2);
- sprintf(temp_install,"%s/%s", cwdbuf, optarg);
+ sprintf(temp_install, "%s/%s", cwdbuf, optarg);
}
canonicalize_path(temp_install);
break;
break;
case 12:
{
- int p = atoi(optarg);
+ int p = atoi(optarg);
/* Since Makefile isn't very bright, check port range */
if (p >= 1024 && p <= 65535)
if (directory_exists(temp_install))
{
header(_("removing existing temp installation"));
- rmtree(temp_install,true);
+ rmtree(temp_install, true);
}
header(_("creating temporary installation"));
}
/*
- * Wait till postmaster is able to accept connections (normally only
- * a second or so, but Cygwin is reportedly *much* slower). Don't
- * wait forever, however.
+ * Wait till postmaster is able to accept connections (normally only a
+ * second or so, but Cygwin is reportedly *much* slower). Don't wait
+ * forever, however.
*/
snprintf(buf, sizeof(buf),
SYSTEMQUOTE "\"%s/psql\" -X postgres <%s 2>%s" SYSTEMQUOTE,
fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir);
/*
- * If we get here, the postmaster is probably wedged somewhere
- * in startup. Try to kill it ungracefully rather than leaving
- * a stuck postmaster that might interfere with subsequent test
+ * If we get here, the postmaster is probably wedged somewhere in
+ * startup. Try to kill it ungracefully rather than leaving a
+ * stuck postmaster that might interfere with subsequent test
* attempts.
*/
#ifndef WIN32
* pre-existing database.
*/
header(_("dropping database \"%s\""), dbname);
- psql_command("postgres","DROP DATABASE IF EXISTS \"%s\"", dbname);
+ psql_command("postgres", "DROP DATABASE IF EXISTS \"%s\"", dbname);
}
/*
* Create the test database
*
- * We use template0 so that any installation-local cruft in template1
- * will not mess up the tests.
+ * We use template0 so that any installation-local cruft in template1 will
+ * not mess up the tests.
*/
header(_("creating database \"%s\""), dbname);
if (encoding)
psql_command("postgres",
- "CREATE DATABASE \"%s\" TEMPLATE=template0 ENCODING='%s'",
+ "CREATE DATABASE \"%s\" TEMPLATE=template0 ENCODING='%s'",
dbname, encoding);
- else /* use installation default */
+ else
+ /* use installation default */
psql_command("postgres",
"CREATE DATABASE \"%s\" TEMPLATE=template0",
dbname);
"ALTER DATABASE \"%s\" SET lc_monetary TO 'C';"
"ALTER DATABASE \"%s\" SET lc_numeric TO 'C';"
"ALTER DATABASE \"%s\" SET lc_time TO 'C';"
- "ALTER DATABASE \"%s\" SET timezone_abbreviations TO 'Default';",
+ "ALTER DATABASE \"%s\" SET timezone_abbreviations TO 'Default';",
dbname, dbname, dbname, dbname, dbname);
/*
snprintf(buf, sizeof(buf),
_(" All %d tests passed. "),
success_count);
- else if (fail_count == 0) /* fail_count=0, fail_ignore_count>0 */
+ else if (fail_count == 0) /* fail_count=0, fail_ignore_count>0 */
snprintf(buf, sizeof(buf),
_(" %d of %d tests passed, %d failed test(s) ignored. "),
success_count,
success_count + fail_ignore_count,
fail_ignore_count);
- else if (fail_ignore_count == 0) /* fail_count>0 && fail_ignore_count=0 */
+ else if (fail_ignore_count == 0) /* fail_count>0 && fail_ignore_count=0 */
snprintf(buf, sizeof(buf),
_(" %d of %d tests failed. "),
fail_count,
- success_count+fail_count);
- else /* fail_count>0 && fail_ignore_count>0 */
+ success_count + fail_count);
+ else
+ /* fail_count>0 && fail_ignore_count>0 */
snprintf(buf, sizeof(buf),
_(" %d of %d tests failed, %d of these failures ignored. "),
- fail_count+fail_ignore_count,
- success_count + fail_count+fail_ignore_count,
+ fail_count + fail_ignore_count,
+ success_count + fail_count + fail_ignore_count,
fail_ignore_count);
putchar('\n');
* Portions Copyright (c) 1996-2006, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.45 2006/09/16 20:14:34 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.46 2006/10/04 00:30:14 momjian Exp $
*
*-------------------------------------------------------------------------
*/
"Australia/Perth"
}, /* (GMT+08:00) Perth */
/* {"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
- * * * * ""}, Could not find a match for this one. Excluded for now. *//* (
+ * * * * * ""}, Could not find a match for this one. Excluded for now. *//* (
* G MT+01:00) West Central Africa */
{
"W. Europe Standard Time", "W. Europe Daylight Time",
*/
#define MAX_TZDIR_DEPTH 10
-struct pg_tzenum {
- int baselen;
- int depth;
- DIR *dirdesc[MAX_TZDIR_DEPTH];
- char *dirname[MAX_TZDIR_DEPTH];
- struct pg_tz tz;
+struct pg_tzenum
+{
+ int baselen;
+ int depth;
+ DIR *dirdesc[MAX_TZDIR_DEPTH];
+ char *dirname[MAX_TZDIR_DEPTH];
+ struct pg_tz tz;
};
+
/* typedef pg_tzenum is declared in pgtime.h */
pg_tzenum *
-pg_tzenumerate_start(void)
+pg_tzenumerate_start(void)
{
- pg_tzenum *ret = (pg_tzenum *) palloc0(sizeof(pg_tzenum));
- char *startdir = pstrdup(pg_TZDIR());
+ pg_tzenum *ret = (pg_tzenum *) palloc0(sizeof(pg_tzenum));
+ char *startdir = pstrdup(pg_TZDIR());
ret->baselen = strlen(startdir) + 1;
ret->depth = 0;
ret->dirname[0] = startdir;
ret->dirdesc[0] = AllocateDir(startdir);
- if (!ret->dirdesc[0])
+ if (!ret->dirdesc[0])
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m", startdir)));
while (dir->depth >= 0)
{
struct dirent *direntry;
- char fullname[MAXPGPATH];
+ char fullname[MAXPGPATH];
struct stat statbuf;
direntry = ReadDir(dir->dirdesc[dir->depth], dir->dirname[dir->depth]);
if (S_ISDIR(statbuf.st_mode))
{
/* Step into the subdirectory */
- if (dir->depth >= MAX_TZDIR_DEPTH-1)
+ if (dir->depth >= MAX_TZDIR_DEPTH - 1)
ereport(ERROR,
(errmsg("timezone directory stack overflow")));
dir->depth++;
dir->dirname[dir->depth] = pstrdup(fullname);
dir->dirdesc[dir->depth] = AllocateDir(fullname);
- if (!dir->dirdesc[dir->depth])
+ if (!dir->dirdesc[dir->depth])
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not open directory \"%s\": %m",
}
/*
- * Load this timezone using tzload() not pg_tzset(),
- * so we don't fill the cache
+ * Load this timezone using tzload() not pg_tzset(), so we don't fill
+ * the cache
*/
if (tzload(fullname + dir->baselen, &dir->tz.state) != 0)
{