+ while (node && *level < wrdlen)
+ {
StopLow = node->data;
- StopHigh = node->data+node->length;
- while (StopLow < StopHigh) {
+ StopHigh = node->data + node->length;
+ while (StopLow < StopHigh)
+ {
StopMiddle = StopLow + ((StopHigh - StopLow) >> 1);
- symbol = GETWCHAR(word,wrdlen,*level,type);
- if ( StopMiddle->val == symbol ) {
+ symbol = GETWCHAR(word, wrdlen, *level, type);
+ if (StopMiddle->val == symbol)
+ {
(*level)++;
- if ( StopMiddle->naff )
+ if (StopMiddle->naff)
return StopMiddle;
- node=StopMiddle->node;
+ node = StopMiddle->node;
break;
- } else if ( StopMiddle->val < symbol ) {
+ }
+ else if (StopMiddle->val < symbol)
StopLow = StopMiddle + 1;
- } else {
+ else
StopHigh = StopMiddle;
- }
}
- if ( StopLow >= StopHigh )
- break;
+ if (StopLow >= StopHigh)
+ break;
}
return NULL;
}
static char *
-CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *newword) {
+CheckAffix(const char *word, size_t len, AFFIX * Affix, char flagflags, char *newword)
+{
- if ( flagflags & FF_COMPOUNDONLYAFX ) {
- if ( (Affix->flagflags & FF_COMPOUNDONLYAFX) == 0 )
+ if (flagflags & FF_COMPOUNDONLYAFX)
+ {
+ if ((Affix->flagflags & FF_COMPOUNDONLYAFX) == 0)
return NULL;
- } else {
- if ( Affix->flagflags & FF_COMPOUNDONLYAFX )
+ }
+ else
+ {
+ if (Affix->flagflags & FF_COMPOUNDONLYAFX)
return NULL;
- }
+ }
- if ( Affix->type==FF_SUFFIX ) {
+ if (Affix->type == FF_SUFFIX)
+ {
strcpy(newword, word);
strcpy(newword + len - Affix->replen, Affix->find);
- } else {
+ }
+ else
+ {
strcpy(newword, Affix->find);
strcat(newword, word + Affix->replen);
}
- if ( Affix->issimple ) {
- return newword;
- } else if ( Affix->isregis ) {
- if (Affix->compile) {
- RS_compile(&(Affix->reg.regis), (Affix->type==FF_SUFFIX) ? 1 : 0, Affix->mask);
- Affix->compile = 0;
- }
- if ( RS_execute(&(Affix->reg.regis), newword, -1) )
- return newword;
- } else {
- regmatch_t subs[2]; /* workaround for apache&linux */
+ if (Affix->issimple)
+ return newword;
+ else if (Affix->isregis)
+ {
+ if (Affix->compile)
+ {
+ RS_compile(&(Affix->reg.regis), (Affix->type == FF_SUFFIX) ? 1 : 0, Affix->mask);
+ Affix->compile = 0;
+ }
+ if (RS_execute(&(Affix->reg.regis), newword, -1))
+ return newword;
+ }
+ else
+ {
+ regmatch_t subs[2]; /* workaround for apache&linux */
int err;
pg_wchar *data;
size_t data_len;
- int dat_len;
+ int dat_len;
+
if (Affix->compile)
{
- int wmasklen,masklen = strlen(Affix->mask);
- pg_wchar *mask;
+ int wmasklen,
+ masklen = strlen(Affix->mask);
+ pg_wchar *mask;
+
mask = (pg_wchar *) palloc((masklen + 1) * sizeof(pg_wchar));
- wmasklen = pg_mb2wchar_with_len( Affix->mask, mask, masklen);
-
+ wmasklen = pg_mb2wchar_with_len(Affix->mask, mask, masklen);
+
err = pg_regcomp(&(Affix->reg.regex), mask, wmasklen, REG_EXTENDED | REG_ICASE | REG_NOSUB);
pfree(mask);
if (err)
{
- /* regerror(err, &(Affix->reg.regex), regerrstr, ERRSTRSIZE); */
+ /*
+ * regerror(err, &(Affix->reg.regex), regerrstr,
+ * ERRSTRSIZE);
+ */
pg_regfree(&(Affix->reg.regex));
return (NULL);
}
data = (pg_wchar *) palloc((dat_len + 1) * sizeof(pg_wchar));
data_len = pg_mb2wchar_with_len(newword, data, dat_len);
- if (!(err = pg_regexec(&(Affix->reg.regex), data,dat_len,NULL, 1, subs, 0))) {
- pfree(data);
- return newword;
+ if (!(err = pg_regexec(&(Affix->reg.regex), data, dat_len, NULL, 1, subs, 0)))
+ {
+ pfree(data);
+ return newword;
}
pfree(data);
}
}
-static char **
-NormalizeSubWord(IspellDict * Conf, char *word, char flag) {
- AffixNodeData *suffix=NULL, *prefix=NULL;
- int slevel=0, plevel=0;
- int wrdlen = strlen(word), swrdlen;
+static char **
+NormalizeSubWord(IspellDict * Conf, char *word, char flag)
+{
+ AffixNodeData *suffix = NULL,
+ *prefix = NULL;
+ int slevel = 0,
+ plevel = 0;
+ int wrdlen = strlen(word),
+ swrdlen;
char **forms;
char **cur;
char newword[2 * MAXNORMLEN] = "";
char pnewword[2 * MAXNORMLEN] = "";
- AffixNode *snode = Conf->Suffix, *pnode;
- int i,j;
+ AffixNode *snode = Conf->Suffix,
+ *pnode;
+ int i,
+ j;
- if (wrdlen > MAXNORMLEN) return NULL;
- strlower(word);
+ if (wrdlen > MAXNORMLEN)
+ return NULL;
+ strlower(word);
cur = forms = (char **) palloc(MAX_NORM * sizeof(char *));
*cur = NULL;
/* Check that the word itself is normal form */
- if (FindWord(Conf, word, 0, flag & FF_COMPOUNDWORD)) {
+ if (FindWord(Conf, word, 0, flag & FF_COMPOUNDWORD))
+ {
*cur = pstrdup(word);
cur++;
*cur = NULL;
}
- /* Find all other NORMAL forms of the 'word' (check only prefix)*/
- pnode=Conf->Prefix;
- plevel=0;
- while(pnode) {
- prefix=FinfAffixes(pnode, word, wrdlen, &plevel,FF_PREFIX);
- if (!prefix) break;
- if ( CheckAffix(word,wrdlen,prefix->aff[j], flag, newword) ) {
+ /* Find all other NORMAL forms of the 'word' (check only prefix) */
+ pnode = Conf->Prefix;
+ plevel = 0;
+ while (pnode)
+ {
+ prefix = FinfAffixes(pnode, word, wrdlen, &plevel, FF_PREFIX);
+ if (!prefix)
+ break;
+ for (j = 0; j < prefix->naff; j++)
+ {
+ if (CheckAffix(word, wrdlen, prefix->aff[j], flag, newword))
+ {
/* prefix success */
- if ( FindWord(Conf, newword, prefix->aff[j]->flag, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) {
+ if (FindWord(Conf, newword, prefix->aff[j]->flag, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1))
+ {
/* word search success */
*cur = pstrdup(newword);
cur++;
- *cur=NULL;
+ *cur = NULL;
}
}
}
pnode = prefix->node;
}
-
- /* Find all other NORMAL forms of the 'word' (check suffix and then prefix)*/
- while( snode ) {
+
+ /*
+ * Find all other NORMAL forms of the 'word' (check suffix and then
+ * prefix)
+ */
+ while (snode)
+ {
/* find possible suffix */
suffix = FinfAffixes(snode, word, wrdlen, &slevel, FF_SUFFIX);
- if (!suffix) break;
+ if (!suffix)
+ break;
/* foreach suffix check affix */
- for(i=0;inaff;i++) {
- if ( CheckAffix(word, wrdlen, suffix->aff[i], flag, newword) ) {
+ for (i = 0; i < suffix->naff; i++)
+ {
+ if (CheckAffix(word, wrdlen, suffix->aff[i], flag, newword))
+ {
/* suffix success */
- if ( FindWord(Conf, newword, suffix->aff[i]->flag, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) {
+ if (FindWord(Conf, newword, suffix->aff[i]->flag, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1))
+ {
/* word search success */
*cur = pstrdup(newword);
cur++;
- *cur=NULL;
+ *cur = NULL;
}
/* now we will look changed word with prefixes */
- pnode=Conf->Prefix;
- plevel=0;
- swrdlen=strlen(newword);
- while(pnode) {
- prefix=FinfAffixes(pnode, newword, swrdlen, &plevel,FF_PREFIX);
- if (!prefix) break;
- if ( CheckAffix(newword,swrdlen,prefix->aff[j], flag, pnewword) ) {
+ pnode = Conf->Prefix;
+ plevel = 0;
+ swrdlen = strlen(newword);
+ while (pnode)
+ {
+ prefix = FinfAffixes(pnode, newword, swrdlen, &plevel, FF_PREFIX);
+ if (!prefix)
+ break;
+ for (j = 0; j < prefix->naff; j++)
+ {
+ if (CheckAffix(newword, swrdlen, prefix->aff[j], flag, pnewword))
+ {
/* prefix success */
- int ff=( prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT ) ?
- 0 : prefix->aff[j]->flag;
- if ( FindWord(Conf, pnewword, ff, flag&FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM-1) ) {
+ int ff = (prefix->aff[j]->flagflags & suffix->aff[i]->flagflags & FF_CROSSPRODUCT) ?
+ 0 : prefix->aff[j]->flag;
+
+ if (FindWord(Conf, pnewword, ff, flag & FF_COMPOUNDWORD) && (cur - forms) < (MAX_NORM - 1))
+ {
/* word search success */
*cur = pstrdup(pnewword);
cur++;
- *cur=NULL;
+ *cur = NULL;
}
}
}
pnode = prefix->node;
- }
+ }
}
}
- snode=suffix->node;
+ snode = suffix->node;
}
- if (cur == forms) {
+ if (cur == forms)
+ {
pfree(forms);
return (NULL);
}
return (forms);
}
-typedef struct SplitVar {
- int nstem;
- char **stem;
- struct SplitVar *next;
-} SplitVar;
+typedef struct SplitVar
+{
+ int nstem;
+ char **stem;
+ struct SplitVar *next;
+} SplitVar;
-static int
-CheckCompoundAffixes(CMPDAffix **ptr, char *word, int len) {
- while( (*ptr)->affix ) {
- if ( len > (*ptr)->len && strncmp((*ptr)->affix, word, (*ptr)->len)==0 ) {
+static int
+CheckCompoundAffixes(CMPDAffix ** ptr, char *word, int len)
+{
+ while ((*ptr)->affix)
+ {
+ if (len > (*ptr)->len && strncmp((*ptr)->affix, word, (*ptr)->len) == 0)
+ {
len = (*ptr)->len;
(*ptr)++;
return len;
return 0;
}
-static SplitVar*
-CopyVar(SplitVar *s, int makedup) {
- SplitVar *v = (SplitVar*)palloc(sizeof(SplitVar));
+static SplitVar *
+CopyVar(SplitVar * s, int makedup)
+{
+ SplitVar *v = (SplitVar *) palloc(sizeof(SplitVar));
+
+ v->stem = (char **) palloc(sizeof(char *) * (MAX_NORM));
+ v->next = NULL;
+ if (s)
+ {
+ int i;
- v->stem=(char**)palloc( sizeof(char*) * (MAX_NORM) );
- v->next=NULL;
- if ( s ) {
- int i;
v->nstem = s->nstem;
- for(i=0;instem;i++)
- v->stem[i] = (makedup) ? pstrdup( s->stem[i] ) : s->stem[i];
- } else {
- v->nstem=0;
+ for (i = 0; i < s->nstem; i++)
+ v->stem[i] = (makedup) ? pstrdup(s->stem[i]) : s->stem[i];
}
+ else
+ v->nstem = 0;
return v;
}
-static SplitVar*
-SplitToVariants( IspellDict * Conf, SPNode *snode, SplitVar * orig, char *word, int wordlen, int startpos, int minpos ) {
- SplitVar *var=NULL;
- SPNodeData *StopLow, *StopHigh, *StopMiddle = NULL;
- SPNode *node = (snode) ? snode : Conf->Dictionary;
- int level=(snode) ? minpos : startpos; /* recursive minpos==level*/
- int lenaff;
- CMPDAffix *caff;
- char *notprobed;
+static SplitVar *
+SplitToVariants(IspellDict * Conf, SPNode * snode, SplitVar * orig, char *word, int wordlen, int startpos, int minpos)
+{
+ SplitVar *var = NULL;
+ SPNodeData *StopLow,
+ *StopHigh,
+ *StopMiddle = NULL;
+ SPNode *node = (snode) ? snode : Conf->Dictionary;
+ int level = (snode) ? minpos : startpos; /* recursive
+ * minpos==level */
+ int lenaff;
+ CMPDAffix *caff;
+ char *notprobed;
notprobed = (char *) palloc(wordlen);
- memset(notprobed,1,wordlen);
- var = CopyVar(orig,1);
+ memset(notprobed, 1, wordlen);
+ var = CopyVar(orig, 1);
- while( node && level
+ while (node && level < wordlen)
+ {
StopLow = node->data;
- StopHigh = node->data+node->length;
- while (StopLow < StopHigh) {
+ StopHigh = node->data + node->length;
+ while (StopLow < StopHigh)
+ {
StopMiddle = StopLow + ((StopHigh - StopLow) >> 1);
- if ( StopMiddle->val == ((uint8*)(word))[level] ) {
+ if (StopMiddle->val == ((uint8 *) (word))[level])
break;
- } else if ( StopMiddle->val < ((uint8*)(word))[level] ) {
+ else if (StopMiddle->val < ((uint8 *) (word))[level])
StopLow = StopMiddle + 1;
- } else {
+ else
StopHigh = StopMiddle;
- }
}
- if ( StopLow >= StopHigh )
+ if (StopLow >= StopHigh)
break;
/* find word with epenthetic */
caff = Conf->CompoundAffix;
- while ( level>startpos && (lenaff=CheckCompoundAffixes( &caff, word + level, wordlen - level ))>0 ) {
- /* there is one of compound suffixes, so check word for existings */
- char buf[MAXNORMLEN];
- char **subres;
-
- lenaff=level-startpos+lenaff;
-
- if ( !notprobed[startpos+lenaff-1] )
+ while (level > startpos && (lenaff = CheckCompoundAffixes(&caff, word + level, wordlen - level)) > 0)
+ {
+ /*
+ * there is one of compound suffixes, so check word for
+ * existings
+ */
+ char buf[MAXNORMLEN];
+ char **subres;
+
+ lenaff = level - startpos + lenaff;
+
+ if (!notprobed[startpos + lenaff - 1])
continue;
-
- if ( level+lenaff-1 <= minpos )
+
+ if (level + lenaff - 1 <= minpos)
continue;
- memcpy(buf, word+startpos, lenaff);
- buf[lenaff]='\0';
+ memcpy(buf, word + startpos, lenaff);
+ buf[lenaff] = '\0';
subres = NormalizeSubWord(Conf, buf, FF_COMPOUNDWORD | FF_COMPOUNDONLYAFX);
- if ( subres ) {
+ if (subres)
+ {
/* Yes, it was a word from dictionary */
- SplitVar *new=CopyVar(var,0);
- SplitVar *ptr=var;
- char **sptr=subres;
-
- notprobed[startpos+lenaff-1]=0;
-
- while(*sptr) {
- new->stem[ new->nstem ] = *sptr;
+ SplitVar *new = CopyVar(var, 0);
+ SplitVar *ptr = var;
+ char **sptr = subres;
+
+ notprobed[startpos + lenaff - 1] = 0;
+
+ while (*sptr)
+ {
+ new->stem[new->nstem] = *sptr;
new->nstem++;
sptr++;
}
pfree(subres);
- while( ptr->next )
+ while (ptr->next)
ptr = ptr->next;
- ptr->next = SplitToVariants(Conf, NULL, new, word, wordlen, startpos+lenaff, startpos+lenaff);
-
+ ptr->next = SplitToVariants(Conf, NULL, new, word, wordlen, startpos + lenaff, startpos + lenaff);
+
pfree(new->stem);
pfree(new);
}
}
/* find infinitive */
- if ( StopMiddle->isword && StopMiddle->compoundallow && notprobed[level] ) {
- /* ok, we found full compoundallowed word*/
- if ( level>minpos ) {
+ if (StopMiddle->isword && StopMiddle->compoundallow && notprobed[level])
+ {
+ /* ok, we found full compoundallowed word */
+ if (level > minpos)
+ {
/* and its length more than minimal */
- if ( wordlen==level+1 ) {
+ if (wordlen == level + 1)
+ {
/* well, it was last word */
- var->stem[ var->nstem ] = strnduplicate(word + startpos, wordlen - startpos);
+ var->stem[var->nstem] = strnduplicate(word + startpos, wordlen - startpos);
var->nstem++;
pfree(notprobed);
return var;
- } else {
+ }
+ else
+ {
/* then we will search more big word at the same point */
- SplitVar *ptr=var;
- while( ptr->next )
+ SplitVar *ptr = var;
+
+ while (ptr->next)
ptr = ptr->next;
- ptr->next=SplitToVariants(Conf, node, var, word, wordlen, startpos, level);
+ ptr->next = SplitToVariants(Conf, node, var, word, wordlen, startpos, level);
/* we can find next word */
level++;
- var->stem[ var->nstem ] = strnduplicate(word + startpos, level - startpos);
+ var->stem[var->nstem] = strnduplicate(word + startpos, level - startpos);
var->nstem++;
node = Conf->Dictionary;
- startpos=level;
+ startpos = level;
continue;
}
}
}
level++;
- node=StopMiddle->node;
+ node = StopMiddle->node;
}
- var->stem[ var->nstem ] = strnduplicate(word + startpos, wordlen - startpos);
+ var->stem[var->nstem] = strnduplicate(word + startpos, wordlen - startpos);
var->nstem++;
pfree(notprobed);
return var;
-}
-
-char **
-NINormalizeWord(IspellDict * Conf, char *word) {
- char **res= NormalizeSubWord(Conf, word, 0);
-
- if ( Conf->compoundcontrol != '\t' ) {
- int wordlen=strlen(word);
- SplitVar *ptr, *var = SplitToVariants(Conf,NULL,NULL, word, wordlen, 0, -1);
- char **cur=res;
- int i;
-
- while(var) {
- if ( var->nstem > 1 ) {
- char **subres = NormalizeSubWord(Conf, var->stem[ var->nstem-1 ], FF_COMPOUNDWORD);
- if ( subres ) {
- char **ptr=subres;
-
- if ( cur ) {
- while(*cur)
+}
+
+char **
+NINormalizeWord(IspellDict * Conf, char *word)
+{
+ char **res = NormalizeSubWord(Conf, word, 0);
+
+ if (Conf->compoundcontrol != '\t')
+ {
+ int wordlen = strlen(word);
+ SplitVar *ptr,
+ *var = SplitToVariants(Conf, NULL, NULL, word, wordlen, 0, -1);
+ char **cur = res;
+ int i;
+
+ while (var)
+ {
+ if (var->nstem > 1)
+ {
+ char **subres = NormalizeSubWord(Conf, var->stem[var->nstem - 1], FF_COMPOUNDWORD);
+
+ if (subres)
+ {
+ char **ptr = subres;
+
+ if (cur)
+ {
+ while (*cur)
cur++;
- } else {
- res=cur=(char **) palloc(MAX_NORM * sizeof(char *));
}
-
- for(i=0;instem-1;i++) {
- *cur=var->stem[ i ];
+ else
+ res = cur = (char **) palloc(MAX_NORM * sizeof(char *));
+
+ for (i = 0; i < var->nstem - 1; i++)
+ {
+ *cur = var->stem[i];
cur++;
}
- while(*ptr) {
- *cur=*ptr;
- cur++; ptr++;
+ while (*ptr)
+ {
+ *cur = *ptr;
+ cur++;
+ ptr++;
}
- *cur=NULL;
+ *cur = NULL;
pfree(subres);
- var->stem[ 0 ] = NULL;
+ var->stem[0] = NULL;
}
}
-
- for(i=0;instem && var->stem[ i ];i++)
- pfree( var->stem[i] );
+
+ for (i = 0; i < var->nstem && var->stem[i]; i++)
+ pfree(var->stem[i]);
ptr = var->next;
pfree(var->stem);
- pfree(var);
- var=ptr;
+ pfree(var);
+ var = ptr;
}
}
return res;
}
-static void freeSPNode(SPNode *node) {
+static void
+freeSPNode(SPNode * node)
+{
SPNodeData *data;
- if (!node) return;
- data=node->data;
- while( node->length ) {
+ if (!node)
+ return;
+ data = node->data;
+ while (node->length)
+ {
freeSPNode(data->node);
data++;
node->length--;
}
free(node);
}
-
-static void freeANode(AffixNode *node) {
+
+static void
+freeANode(AffixNode * node)
+{
AffixNodeData *data;
- if (!node) return;
- data=node->data;
- while( node->length ) {
+ if (!node)
+ return;
+ data = node->data;
+ while (node->length)
+ {
freeANode(data->node);
if (data->naff)
- free(data->aff);
+ free(data->aff);
data++;
node->length--;
}
free(node);
}
-
+
void
NIFree(IspellDict * Conf)
{
int i;
AFFIX *Affix = (AFFIX *) Conf->Affix;
- char** aff = Conf->AffixData;
+ char **aff = Conf->AffixData;
- if ( aff ) {
- while(*aff) {
+ if (aff)
+ {
+ while (*aff)
+ {
free(*aff);
aff++;
}
free(Conf->AffixData);
}
-
+
for (i = 0; i < Conf->naffixes; i++)
{
- if (Affix[i].compile == 0) {
- if ( Affix[i].isregis )
- RS_free(&(Affix[i].reg.regis));
- else
+ if (Affix[i].compile == 0)
+ {
+ if (Affix[i].isregis)
+ RS_free(&(Affix[i].reg.regis));
+ else
pg_regfree(&(Affix[i].reg.regex));
}
}
- if (Conf->Spell) {
+ if (Conf->Spell)
+ {
for (i = 0; i < Conf->nspell; i++)
free(Conf->Spell[i].word);
free(Conf->Spell);
}
- if (Conf->Affix) free(Conf->Affix);
- if ( Conf->CompoundAffix ) free(Conf->CompoundAffix);
+ if (Conf->Affix)
+ free(Conf->Affix);
+ if (Conf->CompoundAffix)
+ free(Conf->CompoundAffix);
freeSPNode(Conf->Dictionary);
freeANode(Conf->Suffix);
freeANode(Conf->Prefix);
struct SPNode;
-typedef struct {
- uint32
- val:8,
- isword:1,
- compoundallow:1,
- affix:22;
- struct SPNode *node;
-} SPNodeData;
-
-typedef struct SPNode {
- uint32 length;
- SPNodeData data[1];
-} SPNode;
+typedef struct
+{
+ uint32
+ val:8,
+ isword:1,
+ compoundallow:1,
+ affix:22;
+ struct SPNode *node;
+} SPNodeData;
+
+typedef struct SPNode
+{
+ uint32 length;
+ SPNodeData data[1];
+} SPNode;
#define SPNHRDSZ (sizeof(uint32))
typedef struct spell_struct
{
char *word;
- union {
+ union
+ {
char flag[16];
- struct {
- int affix;
- int len;
- } d;
- } p;
+ struct
+ {
+ int affix;
+ int len;
+ } d;
+ } p;
} SPELL;
typedef struct aff_struct
{
- uint32
- flag:8,
- type:2,
- compile:1,
- flagflags:3,
- issimple:1,
- isregis:1,
- unused:1,
- replen:16;
- char mask[32];
- char find[16];
- char repl[16];
- union {
- regex_t regex;
- Regis regis;
- } reg;
+ uint32
+ flag:8,
+ type:2,
+ compile:1,
+ flagflags:3,
+ issimple:1,
+ isregis:1,
+ unused:1,
+ replen:16;
+ char mask[32];
+ char find[16];
+ char repl[16];
+ union
+ {
+ regex_t regex;
+ Regis regis;
+ } reg;
} AFFIX;
-#define FF_CROSSPRODUCT 0x01
-#define FF_COMPOUNDWORD 0x02
-#define FF_COMPOUNDONLYAFX 0x04
-#define FF_SUFFIX 2
-#define FF_PREFIX 1
+#define FF_CROSSPRODUCT 0x01
+#define FF_COMPOUNDWORD 0x02
+#define FF_COMPOUNDONLYAFX 0x04
+#define FF_SUFFIX 2
+#define FF_PREFIX 1
struct AffixNode;
-typedef struct {
+typedef struct
+{
uint32
- val:8,
- naff:24;
- AFFIX **aff;
+ val:8,
+ naff:24;
+ AFFIX **aff;
struct AffixNode *node;
-} AffixNodeData;
+} AffixNodeData;
-typedef struct AffixNode {
- uint32 isvoid:1,
- length:31;
- AffixNodeData data[1];
-} AffixNode;
+typedef struct AffixNode
+{
+ uint32 isvoid:1,
+ length:31;
+ AffixNodeData data[1];
+} AffixNode;
-#define ANHRDSZ (sizeof(uint32))
+#define ANHRDSZ (sizeof(uint32))
-typedef struct {
- char *affix;
- int len;
-} CMPDAffix;
+typedef struct
+{
+ char *affix;
+ int len;
+} CMPDAffix;
typedef struct
{
int maffixes;
int naffixes;
AFFIX *Affix;
- char compoundcontrol;
+ char compoundcontrol;
int nspell;
int mspell;
SPELL *Spell;
- AffixNode *Suffix;
- AffixNode *Prefix;
+ AffixNode *Suffix;
+ AffixNode *Prefix;
- SPNode *Dictionary;
- char **AffixData;
- CMPDAffix *CompoundAffix;
+ SPNode *Dictionary;
+ char **AffixData;
+ CMPDAffix *CompoundAffix;
} IspellDict;
Datum
rexectsq(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
return DirectFunctionCall2(
exectsq,
PG_GETARG_DATUM(1),
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
CHKVAL chkval;
bool result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (!val->size || !query->size)
{
PG_FREE_IF_COPY(val, 0);
Datum
tsquery_in(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
PG_RETURN_POINTER(queryin((char *) PG_GETARG_POINTER(0), pushval_asis, 0));
}
QUERYTYPE *query;
ITEM *res;
int4 len;
- SET_FUNCOID();
+
+ SET_FUNCOID();
str = text2char(in);
PG_FREE_IF_COPY(in, 1);
{
text *name = PG_GETARG_TEXT_P(0);
Datum res;
- SET_FUNCOID();
+
+ SET_FUNCOID();
res = DirectFunctionCall2(to_tsquery,
- Int32GetDatum(name2id_cfg(name)),
- PG_GETARG_DATUM(1));
+ Int32GetDatum(name2id_cfg(name)),
+ PG_GETARG_DATUM(1));
PG_FREE_IF_COPY(name, 0);
PG_RETURN_DATUM(res);
static int
compareSNMapEntry(const void *a, const void *b)
{
- if ( ((SNMapEntry *) a)->nsp < ((SNMapEntry *) b)->nsp )
+ if (((SNMapEntry *) a)->nsp < ((SNMapEntry *) b)->nsp)
return -1;
- else if ( ((SNMapEntry *) a)->nsp > ((SNMapEntry *) b)->nsp )
+ else if (((SNMapEntry *) a)->nsp > ((SNMapEntry *) b)->nsp)
return 1;
- else
+ else
return strcmp(((SNMapEntry *) a)->key, ((SNMapEntry *) b)->key);
}
j;
text *ptr;
text *prsname = NULL;
- char *nsp=get_namespace(TSNSP_FunctionOid);
- char buf[1024];
+ char *nsp = get_namespace(TSNSP_FunctionOid);
+ char buf[1024];
MemoryContext oldcontext;
- void *plan;
+ void *plan;
arg[0] = OIDOID;
arg[1] = OIDOID;
SPI_connect();
sprintf(buf, "select prs_name from %s.pg_ts_cfg where oid = $1", nsp);
- plan= SPI_prepare(buf, 1, arg);
+ plan = SPI_prepare(buf, 1, arg);
if (!plan)
ts_error(ERROR, "SPI_prepare() failed");
arg[0] = TEXTOID;
sprintf(buf, "select lt.tokid, map.dict_name from %s.pg_ts_cfgmap as map, %s.pg_ts_cfg as cfg, %s.token_type( $1 ) as lt where lt.alias = map.tok_alias and map.ts_name = cfg.ts_name and cfg.oid= $2 order by lt.tokid desc;", nsp, nsp, nsp);
- plan= SPI_prepare(buf, 2, arg);
+ plan = SPI_prepare(buf, 2, arg);
if (!plan)
ts_error(ERROR, "SPI_prepare() failed");
cfg->map[lexid].len = ARRNELEMS(a);
cfg->map[lexid].dict_id = (Datum *) malloc(sizeof(Datum) * cfg->map[lexid].len);
if (!cfg->map[lexid].dict_id)
- ts_error(ERROR, "No memory");
+ ts_error(ERROR, "No memory");
memset(cfg->map[lexid].dict_id, 0, sizeof(Datum) * cfg->map[lexid].len);
ptr = (text *) ARR_DATA_PTR(a);
Datum pars[1];
int stat;
Oid id = findSNMap_t(&(CList.name2id_map), name);
- void *plan;
- char *nsp;
- char buf[1024];
+ void *plan;
+ char *nsp;
+ char buf[1024];
arg[0] = TEXTOID;
pars[0] = PointerGetDatum(name);
if (id)
return id;
- nsp=get_namespace(TSNSP_FunctionOid);
+ nsp = get_namespace(TSNSP_FunctionOid);
SPI_connect();
- sprintf(buf, "select oid from %s.pg_ts_cfg where ts_name = $1", nsp);
- plan= SPI_prepare(buf, 1, arg);
+ sprintf(buf, "select oid from %s.pg_ts_cfg where ts_name = $1", nsp);
+ plan = SPI_prepare(buf, 1, arg);
if (!plan)
/* internal error */
elog(ERROR, "SPI_prepare() failed");
PointerGetDatum(&lenlemm)))) != 0)
{
- if (lenlemm >= MAXSTRLEN) {
+ if (lenlemm >= MAXSTRLEN)
+ {
#ifdef IGNORE_LONGLEXEME
ereport(NOTICE,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
continue;
-#else
+#else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
PointerGetDatum(&lenlemm)))) != 0)
{
- if (lenlemm >= MAXSTRLEN) {
+ if (lenlemm >= MAXSTRLEN)
+ {
#ifdef IGNORE_LONGLEXEME
ereport(NOTICE,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
continue;
-#else
+#else
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("word is too long")));
ptr += prs->stopsellen;
}
}
- } else
-
- if (!wrd->repeated)
+ }
+ else if (!wrd->repeated)
pfree(wrd->word);
wrd++;
Datum pars[1];
bool isnull;
int stat;
- char buf[1024];
- char *nsp;
- void *plan;
+ char buf[1024];
+ char *nsp;
+ void *plan;
if (current_cfg_id > 0)
return current_cfg_id;
- nsp=get_namespace(TSNSP_FunctionOid);
+ nsp = get_namespace(TSNSP_FunctionOid);
SPI_connect();
- sprintf(buf, "select oid from %s.pg_ts_cfg where locale = $1 ", nsp);
+ sprintf(buf, "select oid from %s.pg_ts_cfg where locale = $1 ", nsp);
pfree(nsp);
plan = SPI_prepare(buf, 1, arg);
if (!plan)
Datum
set_curcfg(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
findcfg(PG_GETARG_OID(0));
current_cfg_id = PG_GETARG_OID(0);
PG_RETURN_VOID();
set_curcfg_byname(PG_FUNCTION_ARGS)
{
text *name = PG_GETARG_TEXT_P(0);
- SET_FUNCOID();
+
+ SET_FUNCOID();
DirectFunctionCall1(
set_curcfg,
ObjectIdGetDatum(name2id_cfg(name))
Datum
show_curcfg(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
PG_RETURN_OID(get_currcfg());
}
Datum
reset_tsearch(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
ts_error(NOTICE, "TSearch cache cleaned");
PG_RETURN_VOID();
}
-
tsstat_in(PG_FUNCTION_ARGS)
{
tsstat *stat = palloc(STATHDRSIZE);
-
+
stat->len = STATHDRSIZE;
stat->size = 0;
stat->weight = 0;
}
static int
-check_weight(tsvector *txt, WordEntry *wptr, int8 weight) {
- int len = POSDATALEN(txt, wptr);
- int num=0;
- WordEntryPos *ptr = POSDATAPTR(txt, wptr);
+check_weight(tsvector * txt, WordEntry * wptr, int8 weight)
+{
+ int len = POSDATALEN(txt, wptr);
+ int num = 0;
+ WordEntryPos *ptr = POSDATAPTR(txt, wptr);
- while (len--) {
+ while (len--)
+ {
if (weight & (1 << ptr->weight))
num++;
ptr++;
}
nptr = STATPTR(newstat) + (StopLow - STATPTR(stat));
memcpy(STATPTR(newstat), STATPTR(stat), sizeof(StatEntry) * (StopLow - STATPTR(stat)));
- if ( (*ptr)->haspos ) {
- nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
- } else
+ if ((*ptr)->haspos)
+ nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
+ else
nptr->nentry = 1;
nptr->ndoc = 1;
nptr->len = (*ptr)->len;
}
else
{
- if ( (*ptr)->haspos ) {
- nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
- } else
+ if ((*ptr)->haspos)
+ nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
+ else
nptr->nentry = 1;
nptr->ndoc = 1;
nptr->len = (*ptr)->len;
while (ptr - entry < len)
{
- if ( (*ptr)->haspos ) {
- nptr->nentry = ( stat->weight ) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
- } else
+ if ((*ptr)->haspos)
+ nptr->nentry = (stat->weight) ? check_weight(txt, *ptr, stat->weight) : POSDATALEN(txt, *ptr);
+ else
nptr->nentry = 1;
nptr->ndoc = 1;
nptr->len = (*ptr)->len;
cur = 0;
StatEntry *sptr;
WordEntry *wptr;
- int n=0;
+ int n = 0;
if (stat == NULL || PG_ARGISNULL(0))
{ /* Init in first */
sptr++;
else if (cmp == 0)
{
- if ( stat->weight == 0 ) {
+ if (stat->weight == 0)
+ {
sptr->ndoc++;
sptr->nentry += (wptr->haspos) ? POSDATALEN(txt, wptr) : 1;
- } else if ( wptr->haspos && (n=check_weight(txt, wptr, stat->weight))!=0 ) {
+ }
+ else if (wptr->haspos && (n = check_weight(txt, wptr, stat->weight)) != 0)
+ {
sptr->ndoc++;
sptr->nentry += n;
}
}
else
{
- if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) {
+ if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0)
+ {
if (cur == len)
newentry = SEI_realloc(newentry, &len);
newentry[cur] = wptr;
while (wptr - ARRPTR(txt) < txt->size)
{
- if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) {
+ if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0)
+ {
if (cur == len)
newentry = SEI_realloc(newentry, &len);
newentry[cur] = wptr;
cmp = compareStatWord(sptr, wptr, stat, txt);
if (cmp == 0)
{
- if ( stat->weight == 0 ) {
+ if (stat->weight == 0)
+ {
sptr->ndoc++;
sptr->nentry += (wptr->haspos) ? POSDATALEN(txt, wptr) : 1;
- } else if ( wptr->haspos && (n=check_weight(txt, wptr, stat->weight))!=0 ) {
+ }
+ else if (wptr->haspos && (n = check_weight(txt, wptr, stat->weight)) != 0)
+ {
sptr->ndoc++;
sptr->nentry += n;
}
if (StopLow >= StopHigh)
{ /* not found */
- if ( stat->weight == 0 || check_weight(txt, wptr, stat->weight)!=0 ) {
+ if (stat->weight == 0 || check_weight(txt, wptr, stat->weight) != 0)
+ {
if (cur == len)
newentry = SEI_realloc(newentry, &len);
newentry[cur] = wptr;
stat->size = 0;
stat->weight = 0;
- if ( ws ) {
- char *buf;
+ if (ws)
+ {
+ char *buf;
+
buf = VARDATA(ws);
- while( buf - VARDATA(ws) < VARSIZE(ws) - VARHDRSZ ) {
- switch (tolower(*buf)) {
+ while (buf - VARDATA(ws) < VARSIZE(ws) - VARHDRSZ)
+ {
+ switch (tolower(*buf))
+ {
case 'a':
stat->weight |= 1 << 3;
break;
{
tsstat *stat;
text *txt = PG_GETARG_TEXT_P(0);
- text *ws = (PG_NARGS() > 1) ? PG_GETARG_TEXT_P(1) : NULL;
+ text *ws = (PG_NARGS() > 1) ? PG_GETARG_TEXT_P(1) : NULL;
funcctx = SRF_FIRSTCALL_INIT();
SPI_connect();
- stat = ts_stat_sql(txt,ws);
+ stat = ts_stat_sql(txt, ws);
PG_FREE_IF_COPY(txt, 0);
- if (PG_NARGS() > 1 ) PG_FREE_IF_COPY(ws, 1);
+ if (PG_NARGS() > 1)
+ PG_FREE_IF_COPY(ws, 1);
ts_setup_firstcall(funcctx, stat);
SPI_finish();
}
*cur;
int4 i,
buflen = 256;
- SET_FUNCOID();
+
+ SET_FUNCOID();
state.prsbuf = buf;
state.len = 32;
state.word = (char *) palloc(state.len);
if (len > 0)
len = uniqueentry(arr, len, tmpbuf, &buflen);
else
- buflen=0;
+ buflen = 0;
totallen = CALCDATASIZE(len, buflen);
in = (tsvector *) palloc(totallen);
memset(in, 0, totallen);
res->alen *= 2;
res->pos.apos = (uint16 *) repalloc(res->pos.apos, sizeof(uint16) * res->alen);
}
- if ( res->pos.apos[0]==0 || res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos) ) {
+ if (res->pos.apos[0] == 0 || res->pos.apos[res->pos.apos[0]] != LIMITPOS(ptr->pos.pos))
+ {
res->pos.apos[res->pos.apos[0] + 1] = LIMITPOS(ptr->pos.pos);
res->pos.apos[0]++;
}
tsvector *out = NULL;
TSCfgInfo *cfg;
- SET_FUNCOID();
+ SET_FUNCOID();
cfg = findcfg(PG_GETARG_INT32(0));
prs.lenwords = 32;
{
text *cfg = PG_GETARG_TEXT_P(0);
Datum res;
- SET_FUNCOID();
+
+ SET_FUNCOID();
res = DirectFunctionCall3(
- to_tsvector,
- Int32GetDatum(name2id_cfg(cfg)),
- PG_GETARG_DATUM(1),
- (Datum) 0
- );
+ to_tsvector,
+ Int32GetDatum(name2id_cfg(cfg)),
+ PG_GETARG_DATUM(1),
+ (Datum) 0
+ );
PG_FREE_IF_COPY(cfg, 0);
PG_RETURN_DATUM(res);
to_tsvector_current(PG_FUNCTION_ARGS)
{
Datum res;
- SET_FUNCOID();
+
+ SET_FUNCOID();
res = DirectFunctionCall3(
- to_tsvector,
- Int32GetDatum(get_currcfg()),
- PG_GETARG_DATUM(0),
- (Datum) 0
- );
+ to_tsvector,
+ Int32GetDatum(get_currcfg()),
+ PG_GETARG_DATUM(0),
+ (Datum) 0
+ );
PG_RETURN_DATUM(res);
}
Oid funcoid = InvalidOid;
TSCfgInfo *cfg;
- SET_FUNCOID();
+ SET_FUNCOID();
cfg = findcfg(get_currcfg());
if (!CALLED_AS_TRIGGER(fcinfo))
}
static int
-silly_cmp_tsvector(const tsvector *a, const tsvector *b) {
- if ( a->len < b->len )
+silly_cmp_tsvector(const tsvector * a, const tsvector * b)
+{
+ if (a->len < b->len)
return -1;
- else if ( a->len > b->len )
+ else if (a->len > b->len)
return 1;
- else if ( a->size < b->size )
+ else if (a->size < b->size)
return -1;
- else if ( a->size > b->size )
+ else if (a->size > b->size)
return 1;
- else {
- unsigned char *aptr=(unsigned char *)(a->data) + DATAHDRSIZE;
- unsigned char *bptr=(unsigned char *)(b->data) + DATAHDRSIZE;
-
- while( aptr - ( (unsigned char *)(a->data) ) < a->len ) {
- if ( *aptr != *bptr )
- return ( *aptr < *bptr ) ? -1 : 1;
- aptr++; bptr++;
- }
+ else
+ {
+ unsigned char *aptr = (unsigned char *) (a->data) + DATAHDRSIZE;
+ unsigned char *bptr = (unsigned char *) (b->data) + DATAHDRSIZE;
+
+ while (aptr - ((unsigned char *) (a->data)) < a->len)
+ {
+ if (*aptr != *bptr)
+ return (*aptr < *bptr) ? -1 : 1;
+ aptr++;
+ bptr++;
+ }
}
- return 0;
+ return 0;
}
PG_FUNCTION_INFO_V1(tsvector_cmp);
PG_FUNCTION_INFO_V1(tsvector_ne);
PG_FUNCTION_INFO_V1(tsvector_ge);
PG_FUNCTION_INFO_V1(tsvector_gt);
-Datum tsvector_cmp(PG_FUNCTION_ARGS);
-Datum tsvector_lt(PG_FUNCTION_ARGS);
-Datum tsvector_le(PG_FUNCTION_ARGS);
-Datum tsvector_eq(PG_FUNCTION_ARGS);
-Datum tsvector_ne(PG_FUNCTION_ARGS);
-Datum tsvector_ge(PG_FUNCTION_ARGS);
-Datum tsvector_gt(PG_FUNCTION_ARGS);
-
-#define RUNCMP \
-tsvector *a = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));\
-tsvector *b = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));\
+Datum tsvector_cmp(PG_FUNCTION_ARGS);
+Datum tsvector_lt(PG_FUNCTION_ARGS);
+Datum tsvector_le(PG_FUNCTION_ARGS);
+Datum tsvector_eq(PG_FUNCTION_ARGS);
+Datum tsvector_ne(PG_FUNCTION_ARGS);
+Datum tsvector_ge(PG_FUNCTION_ARGS);
+Datum tsvector_gt(PG_FUNCTION_ARGS);
+
+#define RUNCMP \
+tsvector *a = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));\
+tsvector *b = (tsvector *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));\
int res = silly_cmp_tsvector(a,b); \
PG_FREE_IF_COPY(a,0); \
PG_FREE_IF_COPY(b,1); \
Datum
-tsvector_cmp(PG_FUNCTION_ARGS) {
+tsvector_cmp(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_INT32(res);
}
Datum
-tsvector_lt(PG_FUNCTION_ARGS) {
+tsvector_lt(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_BOOL((res < 0) ? true : false);
}
Datum
-tsvector_le(PG_FUNCTION_ARGS) {
+tsvector_le(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_BOOL((res <= 0) ? true : false);
}
Datum
-tsvector_eq(PG_FUNCTION_ARGS) {
+tsvector_eq(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_BOOL((res == 0) ? true : false);
}
Datum
-tsvector_ge(PG_FUNCTION_ARGS) {
+tsvector_ge(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_BOOL((res >= 0) ? true : false);
}
-
+
Datum
-tsvector_gt(PG_FUNCTION_ARGS) {
+tsvector_gt(PG_FUNCTION_ARGS)
+{
RUNCMP
PG_RETURN_BOOL((res > 0) ? true : false);
-}
-
+}
+
Datum
-tsvector_ne(PG_FUNCTION_ARGS) {
- RUNCMP
+tsvector_ne(PG_FUNCTION_ARGS)
+{
+ RUNCMP
PG_RETURN_BOOL((res != 0) ? true : false);
}
-
#ifndef __PARSER_H__
#define __PARSER_H__
-extern char *token;
-extern int tokenlen;
+extern char *token;
+extern int tokenlen;
int tsearch2_yylex(void);
void tsearch2_start_parse_str(char *, int);
void tsearch2_end_parse(void);
bool isnull;
Datum pars[1];
int stat;
- void *plan;
- char buf[1024], *nsp;
+ void *plan;
+ char buf[1024],
+ *nsp;
arg[0] = OIDOID;
pars[0] = ObjectIdGetDatum(id);
memset(prs, 0, sizeof(WParserInfo));
SPI_connect();
- nsp=get_namespace(TSNSP_FunctionOid);
+ nsp = get_namespace(TSNSP_FunctionOid);
sprintf(buf, "select prs_start, prs_nexttoken, prs_end, prs_lextype, prs_headline from %s.pg_ts_parser where oid = $1", nsp);
pfree(nsp);
- plan= SPI_prepare(buf, 1, arg);
+ plan = SPI_prepare(buf, 1, arg);
if (!plan)
ts_error(ERROR, "SPI_prepare() failed");
Datum pars[1];
int stat;
Oid id = findSNMap_t(&(PList.name2id_map), name);
- char buf[1024], *nsp;
- void *plan;
+ char buf[1024],
+ *nsp;
+ void *plan;
arg[0] = TEXTOID;
pars[0] = PointerGetDatum(name);
nsp = get_namespace(TSNSP_FunctionOid);
sprintf(buf, "select oid from %s.pg_ts_parser where prs_name = $1", nsp);
pfree(nsp);
- plan= SPI_prepare(buf, 1, arg);
+ plan = SPI_prepare(buf, 1, arg);
if (!plan)
ts_error(ERROR, "SPI_prepare() failed");
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
funcctx = SRF_FIRSTCALL_INIT();
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
text *name = PG_GETARG_TEXT_P(0);
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
funcctx = SRF_FIRSTCALL_INIT();
Datum
set_curprs(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
findprs(PG_GETARG_OID(0));
current_parser_id = PG_GETARG_OID(0);
PG_RETURN_VOID();
set_curprs_byname(PG_FUNCTION_ARGS)
{
text *name = PG_GETARG_TEXT_P(0);
- SET_FUNCOID();
+
+ SET_FUNCOID();
DirectFunctionCall1(
set_curprs,
ObjectIdGetDatum(name2id_prs(name))
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
text *txt = PG_GETARG_TEXT_P(1);
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
text *name = PG_GETARG_TEXT_P(0);
{
FuncCallContext *funcctx;
Datum result;
- SET_FUNCOID();
+
+ SET_FUNCOID();
if (SRF_IS_FIRSTCALL())
{
text *txt = PG_GETARG_TEXT_P(0);
TSCfgInfo *cfg;
WParserInfo *prsobj;
- SET_FUNCOID();
+ SET_FUNCOID();
cfg = findcfg(PG_GETARG_OID(0));
prsobj = findprs(cfg->prs_id);
text *cfg = PG_GETARG_TEXT_P(0);
Datum out;
- SET_FUNCOID();
+
+ SET_FUNCOID();
out = DirectFunctionCall4(
- headline,
- ObjectIdGetDatum(name2id_cfg(cfg)),
- PG_GETARG_DATUM(1),
- PG_GETARG_DATUM(2),
+ headline,
+ ObjectIdGetDatum(name2id_cfg(cfg)),
+ PG_GETARG_DATUM(1),
+ PG_GETARG_DATUM(2),
(PG_NARGS() > 3) ? PG_GETARG_DATUM(3) : PointerGetDatum(NULL)
- );
+ );
PG_FREE_IF_COPY(cfg, 0);
PG_RETURN_DATUM(out);
Datum
headline_current(PG_FUNCTION_ARGS)
{
- SET_FUNCOID();
+ SET_FUNCOID();
PG_RETURN_DATUM(DirectFunctionCall4(
headline,
ObjectIdGetDatum(get_currcfg()),
int bestb = -1,
beste = -1;
int bestlen = -1;
- int pose = 0, posb,
+ int pose = 0,
+ posb,
poslen,
curlen;
int i;
- int highlight=0;
+ int highlight = 0;
/* config */
prs->startsel = NULL;
prs->stopsel = pstrdup(mptr->value);
else if (pg_strcasecmp(mptr->key, "HighlightAll") == 0)
highlight = (
- pg_strcasecmp(mptr->value, "1")==0 ||
- pg_strcasecmp(mptr->value, "on")==0 ||
- pg_strcasecmp(mptr->value, "true")==0 ||
- pg_strcasecmp(mptr->value, "t")==0 ||
- pg_strcasecmp(mptr->value, "y")==0 ||
- pg_strcasecmp(mptr->value, "yes")==0 ) ?
- 1 : 0;
+ pg_strcasecmp(mptr->value, "1") == 0 ||
+ pg_strcasecmp(mptr->value, "on") == 0 ||
+ pg_strcasecmp(mptr->value, "true") == 0 ||
+ pg_strcasecmp(mptr->value, "t") == 0 ||
+ pg_strcasecmp(mptr->value, "y") == 0 ||
+ pg_strcasecmp(mptr->value, "yes") == 0) ?
+ 1 : 0;
pfree(mptr->key);
pfree(mptr->value);
}
pfree(map);
- if (highlight==0) {
+ if (highlight == 0)
+ {
if (min_words >= max_words)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("MinWords should be less than MaxWords")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("MinWords should be less than MaxWords")));
if (min_words <= 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("MinWords should be positive")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("MinWords should be positive")));
if (shortword < 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("ShortWord should be >= 0")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("ShortWord should be >= 0")));
}
}
- if (highlight==0) {
+ if (highlight == 0)
+ {
while (hlCover(prs, query, &p, &q))
{
/* find cover len in words */
poslen++;
pose = i;
}
-
+
if (poslen < bestlen && !(NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword))
{
/* best already finded, so try one more cover */
p++;
continue;
}
-
- posb=p;
+
+ posb = p;
if (curlen < max_words)
- { /* find good end */
+ { /* find good end */
for (i = i - 1; i < prs->curwords && curlen < max_words; i++)
{
if (i != q)
if (curlen >= min_words)
break;
}
- if ( curlen < min_words && i>=prs->curwords ) { /* got end of text and our cover is shoter than min_words */
- for(i=p; i>= 0; i--) {
+ if (curlen < min_words && i >= prs->curwords)
+ { /* got end of text and our cover is shoter
+ * than min_words */
+ for (i = p; i >= 0; i--)
+ {
if (!NONWORDTOKEN(prs->words[i].type))
curlen++;
if (prs->words[i].item && !prs->words[i].repeated)
if (curlen >= min_words)
break;
}
- posb=(i>=0) ? i : 0;
+ posb = (i >= 0) ? i : 0;
}
}
else
- { /* shorter cover :((( */
+ { /* shorter cover :((( */
for (; curlen > min_words; i--)
{
if (!NONWORDTOKEN(prs->words[i].type))
break;
}
}
-
+
if (bestlen < 0 || (poslen > bestlen && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword)) ||
(bestlen >= 0 && !(NOENDTOKEN(prs->words[pose].type) || prs->words[pose].len <= shortword) &&
(NOENDTOKEN(prs->words[beste].type) || prs->words[beste].len <= shortword)))
beste = pose;
bestlen = poslen;
}
-
+
p++;
}
bestb = 0;
beste = pose;
}
- } else {
- bestb=0;
- beste=prs->curwords-1;
+ }
+ else
+ {
+ bestb = 0;
+ beste = prs->curwords - 1;
}
for (i = bestb; i <= beste; i++)
{
if (prs->words[i].item)
prs->words[i].selected = 1;
- if ( highlight==0 ) {
+ if (highlight == 0)
+ {
if (HLIDIGNORE(prs->words[i].type))
prs->words[i].replace = 1;
- } else {
+ }
+ else
+ {
if (HTMLHLIDIGNORE(prs->words[i].type))
prs->words[i].replace = 1;
}
static void *pgxml_repalloc(void *ptr, size_t size);
static void pgxml_pfree(void *ptr);
static char *pgxml_pstrdup(const char *string);
-static void pgxml_errorHandler (void * ctxt, const char *msg, ...);
+static void pgxml_errorHandler(void *ctxt, const char *msg,...);
-void elog_error(int level, char *explain, int force);
-void pgxml_parser_init(void);
+void elog_error(int level, char *explain, int force);
+void pgxml_parser_init(void);
static xmlChar *pgxmlNodeSetToText(xmlNodeSetPtr nodeset,
xmlChar * toptagname, xmlChar * septagname,
xmlChar * plainsep);
-text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar *toptag,
- xmlChar *septag, xmlChar *plainsep);
+text *pgxml_result_to_text(xmlXPathObjectPtr res, xmlChar * toptag,
+ xmlChar * septag, xmlChar * plainsep);
-xmlChar *pgxml_texttoxmlchar(text *textstring);
+xmlChar *pgxml_texttoxmlchar(text *textstring);
-static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar* xpath);
+static xmlXPathObjectPtr pgxml_xpath(text *document, xmlChar * xpath);
Datum xml_valid(PG_FUNCTION_ARGS);
-Datum xpath_nodeset(PG_FUNCTION_ARGS);
+Datum xpath_nodeset(PG_FUNCTION_ARGS);
Datum xpath_string(PG_FUNCTION_ARGS);
Datum xpath_number(PG_FUNCTION_ARGS);
-Datum xpath_bool(PG_FUNCTION_ARGS);
-Datum xpath_list(PG_FUNCTION_ARGS);
-Datum xpath_table(PG_FUNCTION_ARGS);
+Datum xpath_bool(PG_FUNCTION_ARGS);
+Datum xpath_list(PG_FUNCTION_ARGS);
+Datum xpath_table(PG_FUNCTION_ARGS);
/* Global variables */
-char *errbuf; /* per line error buffer */
-char *pgxml_errorMsg = NULL; /* overall error message */
+char *errbuf; /* per line error buffer */
+char *pgxml_errorMsg = NULL; /* overall error message */
/* Convenience macros */
*/
static void
-pgxml_errorHandler (void * ctxt, const char *msg, ...)
+pgxml_errorHandler(void *ctxt, const char *msg,...)
{
- va_list args;
-
- va_start(args, msg);
- vsnprintf(errbuf, ERRBUF_SIZE, msg, args);
- va_end(args);
- /* Now copy the argument across */
- if (pgxml_errorMsg == NULL)
- {
- pgxml_errorMsg = pstrdup(errbuf);
- }
-else
- {
- int32 xsize = strlen(pgxml_errorMsg);
- pgxml_errorMsg = repalloc(pgxml_errorMsg,
- (size_t) (xsize + strlen(errbuf) + 1));
- strncpy(&pgxml_errorMsg[xsize-1],errbuf,strlen(errbuf));
- pgxml_errorMsg[xsize+strlen(errbuf)-1]='\0';
-
- }
- memset(errbuf,0,ERRBUF_SIZE);
+ va_list args;
+
+ va_start(args, msg);
+ vsnprintf(errbuf, ERRBUF_SIZE, msg, args);
+ va_end(args);
+ /* Now copy the argument across */
+ if (pgxml_errorMsg == NULL)
+ pgxml_errorMsg = pstrdup(errbuf);
+ else
+ {
+ int32 xsize = strlen(pgxml_errorMsg);
+
+ pgxml_errorMsg = repalloc(pgxml_errorMsg,
+ (size_t) (xsize + strlen(errbuf) + 1));
+ strncpy(&pgxml_errorMsg[xsize - 1], errbuf, strlen(errbuf));
+ pgxml_errorMsg[xsize + strlen(errbuf) - 1] = '\0';
+
+ }
+ memset(errbuf, 0, ERRBUF_SIZE);
}
/* This function reports the current message at the level specified */
-void elog_error(int level, char *explain, int force)
+void
+elog_error(int level, char *explain, int force)
{
- if (force || (pgxml_errorMsg != NULL))
- {
- if (pgxml_errorMsg == NULL)
+ if (force || (pgxml_errorMsg != NULL))
{
- ereport(level,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
- errmsg(explain)));
- }
- else
- {
- ereport(level,(errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
- errmsg("%s:%s",explain,pgxml_errorMsg)));
- pfree(pgxml_errorMsg);
+ if (pgxml_errorMsg == NULL)
+ {
+ ereport(level, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
+ errmsg(explain)));
+ }
+ else
+ {
+ ereport(level, (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION),
+ errmsg("%s:%s", explain, pgxml_errorMsg)));
+ pfree(pgxml_errorMsg);
+ }
}
- }
}
void
pgxml_errorMsg = NULL;
errbuf = palloc(200);
- memset(errbuf,0,200);
+ memset(errbuf, 0, 200);
}
pgxmlNodeSetToText(xmlNodeSetPtr nodeset,
xmlChar * toptagname,
xmlChar * septagname,
- xmlChar * plainsep)
+ xmlChar * plainsep)
{
/* Function translates a nodeset into a text representation */
*/
/* each representation is surrounded by ... */
- /* plainsep is an ordinary (not tag) seperator - if used, then
- * nodes are cast to string as output method */
-
+
+ /*
+ * plainsep is an ordinary (not tag) seperator - if used, then nodes
+ * are cast to string as output method
+ */
+
xmlBufferPtr buf;
xmlChar *result;
for (i = 0; i < nodeset->nodeNr; i++)
{
- if (plainsep != NULL) {
- xmlBufferWriteCHAR(buf,
- xmlXPathCastNodeToString(nodeset->nodeTab[i]));
-
- /* If this isn't the last entry, write the plain sep. */
- if (i < (nodeset->nodeNr)-1) {
- xmlBufferWriteChar(buf, plainsep);
- }
- } else {
-
-
- if ((septagname != NULL) && (xmlStrlen(septagname) > 0))
+ if (plainsep != NULL)
{
- xmlBufferWriteChar(buf, "<");
- xmlBufferWriteCHAR(buf, septagname);
- xmlBufferWriteChar(buf, ">");
- }
- xmlNodeDump(buf,
- nodeset->nodeTab[i]->doc,
- nodeset->nodeTab[i],
- 1, 0);
+ xmlBufferWriteCHAR(buf,
+ xmlXPathCastNodeToString(nodeset->nodeTab[i]));
- if ((septagname != NULL) && (xmlStrlen(septagname) > 0))
+ /* If this isn't the last entry, write the plain sep. */
+ if (i < (nodeset->nodeNr) - 1)
+ xmlBufferWriteChar(buf, plainsep);
+ }
+ else
{
- xmlBufferWriteChar(buf, "");
- xmlBufferWriteCHAR(buf, septagname);
- xmlBufferWriteChar(buf, ">");
+
+
+ if ((septagname != NULL) && (xmlStrlen(septagname) > 0))
+ {
+ xmlBufferWriteChar(buf, "<");
+ xmlBufferWriteCHAR(buf, septagname);
+ xmlBufferWriteChar(buf, ">");
+ }
+ xmlNodeDump(buf,
+ nodeset->nodeTab[i]->doc,
+ nodeset->nodeTab[i],
+ 1, 0);
+
+ if ((septagname != NULL) && (xmlStrlen(septagname) > 0))
+ {
+ xmlBufferWriteChar(buf, "");
+ xmlBufferWriteCHAR(buf, septagname);
+ xmlBufferWriteChar(buf, ">");
+ }
}
- }
}
}
Datum
xpath_nodeset(PG_FUNCTION_ARGS)
{
- xmlChar *xpath, *toptag, *septag;
- int32 pathsize;
- text
- *xpathsupp,
- *xpres;
+ xmlChar *xpath,
+ *toptag,
+ *septag;
+ int32 pathsize;
+ text
+ *xpathsupp,
+ *xpres;
/* PG_GETARG_TEXT_P(0) is document buffer */
xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xpath = pgxml_texttoxmlchar(xpathsupp);
xpres = pgxml_result_to_text(
- pgxml_xpath(PG_GETARG_TEXT_P(0),xpath),
- toptag,septag,NULL);
+ pgxml_xpath(PG_GETARG_TEXT_P(0), xpath),
+ toptag, septag, NULL);
/* xmlCleanupParser(); done by result_to_text routine */
pfree((void *) xpath);
- if (xpres == NULL)
- {
- PG_RETURN_NULL();
- }
+ if (xpres == NULL)
+ PG_RETURN_NULL();
PG_RETURN_TEXT_P(xpres);
}
-// The following function is almost identical, but returns the elements in
-// a list.
+/* The following function is almost identical, but returns the elements in */
+/* a list. */
PG_FUNCTION_INFO_V1(xpath_list);
Datum
xpath_list(PG_FUNCTION_ARGS)
{
- xmlChar *xpath, *plainsep;
- int32 pathsize;
- text
- *xpathsupp,
- *xpres;
+ xmlChar *xpath,
+ *plainsep;
+ int32 pathsize;
+ text
+ *xpathsupp,
+ *xpres;
/* PG_GETARG_TEXT_P(0) is document buffer */
xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
xpath = pgxml_texttoxmlchar(xpathsupp);
xpres = pgxml_result_to_text(
- pgxml_xpath(PG_GETARG_TEXT_P(0),xpath),
- NULL,NULL,plainsep);
+ pgxml_xpath(PG_GETARG_TEXT_P(0), xpath),
+ NULL, NULL, plainsep);
/* xmlCleanupParser(); done by result_to_text routine */
pfree((void *) xpath);
- if (xpres == NULL)
- {
- PG_RETURN_NULL();
- }
+ if (xpres == NULL)
+ PG_RETURN_NULL();
PG_RETURN_TEXT_P(xpres);
}
Datum
xpath_string(PG_FUNCTION_ARGS)
{
- xmlChar *xpath;
- int32 pathsize;
- text
- *xpathsupp,
- *xpres;
+ xmlChar *xpath;
+ int32 pathsize;
+ text
+ *xpathsupp,
+ *xpres;
/* PG_GETARG_TEXT_P(0) is document buffer */
xpathsupp = PG_GETARG_TEXT_P(1); /* XPath expression */
pathsize = VARSIZE(xpathsupp) - VARHDRSZ;
- /* We encapsulate the supplied path with "string()"
- * = 8 chars + 1 for NUL at end */
+ /*
+ * We encapsulate the supplied path with "string()" = 8 chars + 1 for
+ * NUL at end
+ */
/* We could try casting to string using the libxml function? */
- xpath =(xmlChar *) palloc(pathsize + 9);
- memcpy((char *) (xpath+7), VARDATA(xpathsupp), pathsize);
- strncpy((char *) xpath, "string(",7);
- xpath[pathsize+7] = ')';
- xpath[pathsize+8] = '\0';
+ xpath = (xmlChar *) palloc(pathsize + 9);
+ memcpy((char *) (xpath + 7), VARDATA(xpathsupp), pathsize);
+ strncpy((char *) xpath, "string(", 7);
+ xpath[pathsize + 7] = ')';
+ xpath[pathsize + 8] = '\0';
xpres = pgxml_result_to_text(
- pgxml_xpath(PG_GETARG_TEXT_P(0),xpath),
- NULL,NULL,NULL);
+ pgxml_xpath(PG_GETARG_TEXT_P(0), xpath),
+ NULL, NULL, NULL);
xmlCleanupParser();
pfree((void *) xpath);
- if (xpres == NULL)
- {
- PG_RETURN_NULL();
- }
+ if (xpres == NULL)
+ PG_RETURN_NULL();
PG_RETURN_TEXT_P(xpres);
}
Datum
xpath_number(PG_FUNCTION_ARGS)
{
- xmlChar *xpath;
- int32 pathsize;
- text
- *xpathsupp;
-
- float4 fRes;
+ xmlChar *xpath;
+ int32 pathsize;
+ text
+ *xpathsupp;
+
+ float4 fRes;
xmlXPathObjectPtr res;
xpath = pgxml_texttoxmlchar(xpathsupp);
- res = pgxml_xpath(PG_GETARG_TEXT_P(0),xpath);
+ res = pgxml_xpath(PG_GETARG_TEXT_P(0), xpath);
pfree((void *) xpath);
if (res == NULL)
- {
- xmlCleanupParser();
- PG_RETURN_NULL();
- }
+ {
+ xmlCleanupParser();
+ PG_RETURN_NULL();
+ }
fRes = xmlXPathCastToNumber(res);
xmlCleanupParser();
if (xmlXPathIsNaN(fRes))
- {
- PG_RETURN_NULL();
- }
+ PG_RETURN_NULL();
PG_RETURN_FLOAT4(fRes);
Datum
xpath_bool(PG_FUNCTION_ARGS)
{
- xmlChar *xpath;
- int32 pathsize;
- text
- *xpathsupp;
-
- int bRes;
+ xmlChar *xpath;
+ int32 pathsize;
+ text
+ *xpathsupp;
+
+ int bRes;
xmlXPathObjectPtr res;
xpath = pgxml_texttoxmlchar(xpathsupp);
- res = pgxml_xpath(PG_GETARG_TEXT_P(0),xpath);
+ res = pgxml_xpath(PG_GETARG_TEXT_P(0), xpath);
pfree((void *) xpath);
if (res == NULL)
- {
- xmlCleanupParser();
- PG_RETURN_BOOL(false);
- }
+ {
+ xmlCleanupParser();
+ PG_RETURN_BOOL(false);
+ }
bRes = xmlXPathCastToBoolean(res);
xmlCleanupParser();
/* Core function to evaluate XPath query */
xmlXPathObjectPtr
- pgxml_xpath(text *document, xmlChar *xpath)
- {
+pgxml_xpath(text *document, xmlChar * xpath)
+{
xmlDocPtr doctree;
xmlXPathContextPtr ctxt;
int32 docsize;
-
+
docsize = VARSIZE(document) - VARHDRSZ;
pgxml_parser_init();
doctree = xmlParseMemory((char *) VARDATA(document), docsize);
if (doctree == NULL)
- { /* not well-formed */
+ { /* not well-formed */
return NULL;
}
{
xmlCleanupParser();
xmlFreeDoc(doctree);
- elog_error(ERROR,"XPath Syntax Error",1);
+ elog_error(ERROR, "XPath Syntax Error", 1);
- return NULL;
+ return NULL;
}
/* Now evaluate the path expression. */
if (res == NULL)
{
- xmlXPathFreeContext(ctxt);
- // xmlCleanupParser();
+ xmlXPathFreeContext(ctxt);
+ /* xmlCleanupParser(); */
xmlFreeDoc(doctree);
return NULL;
}
/* xmlFreeDoc(doctree); */
return res;
- }
+}
-text
-*pgxml_result_to_text(xmlXPathObjectPtr res,
- xmlChar *toptag,
- xmlChar *septag,
- xmlChar *plainsep)
+text
+ *
+pgxml_result_to_text(xmlXPathObjectPtr res,
+ xmlChar * toptag,
+ xmlChar * septag,
+ xmlChar * plainsep)
{
- xmlChar *xpresstr;
- int32 ressize;
- text *xpres;
-
- if (res == NULL)
- {
- return NULL;
- }
+ xmlChar *xpresstr;
+ int32 ressize;
+ text *xpres;
+
+ if (res == NULL)
+ return NULL;
switch (res->type)
{
case XPATH_NODESET:
xpresstr = pgxmlNodeSetToText(res->nodesetval,
- toptag,
- septag, plainsep);
+ toptag,
+ septag, plainsep);
break;
case XPATH_STRING:
xmlFree(xpresstr);
- elog_error(ERROR,"XPath error",0);
+ elog_error(ERROR, "XPath error", 0);
return xpres;
PG_FUNCTION_INFO_V1(xpath_table);
-Datum xpath_table(PG_FUNCTION_ARGS)
+Datum
+xpath_table(PG_FUNCTION_ARGS)
{
/* SPI (input tuple) support */
- SPITupleTable *tuptable;
- HeapTuple spi_tuple;
- TupleDesc spi_tupdesc;
+ SPITupleTable *tuptable;
+ HeapTuple spi_tuple;
+ TupleDesc spi_tupdesc;
/* Output tuple (tuplestore) support */
- Tuplestorestate *tupstore = NULL;
- TupleDesc ret_tupdesc;
- HeapTuple ret_tuple;
-
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- AttInMetadata *attinmeta;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
-
-/* Function parameters */
- char *pkeyfield = GET_STR(PG_GETARG_TEXT_P(0));
- char *xmlfield = GET_STR(PG_GETARG_TEXT_P(1));
- char *relname = GET_STR(PG_GETARG_TEXT_P(2));
- char *xpathset = GET_STR(PG_GETARG_TEXT_P(3));
- char *condition = GET_STR(PG_GETARG_TEXT_P(4));
-
- char **values;
- xmlChar **xpaths;
- xmlChar *pos;
- xmlChar *pathsep= "|";
-
- int numpaths;
- int ret;
- int proc;
- int i;
- int j;
- int rownr; /* For issuing multiple rows from one original document */
- int had_values; /* To determine end of nodeset results */
-
- StringInfo querysql;
+ Tuplestorestate *tupstore = NULL;
+ TupleDesc ret_tupdesc;
+ HeapTuple ret_tuple;
+
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ AttInMetadata *attinmeta;
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+
+/* Function parameters */
+ char *pkeyfield = GET_STR(PG_GETARG_TEXT_P(0));
+ char *xmlfield = GET_STR(PG_GETARG_TEXT_P(1));
+ char *relname = GET_STR(PG_GETARG_TEXT_P(2));
+ char *xpathset = GET_STR(PG_GETARG_TEXT_P(3));
+ char *condition = GET_STR(PG_GETARG_TEXT_P(4));
+
+ char **values;
+ xmlChar **xpaths;
+ xmlChar *pos;
+ xmlChar *pathsep = "|";
+
+ int numpaths;
+ int ret;
+ int proc;
+ int i;
+ int j;
+ int rownr; /* For issuing multiple rows from one
+ * original document */
+ int had_values; /* To determine end of nodeset results */
+
+ StringInfo querysql;
/* We only have a valid tuple description in table function mode */
- if (rsinfo->expectedDesc == NULL) {
- ereport(ERROR,(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("xpath_table must be called as a table function")));
- }
-
-/* The tuplestore must exist in a higher context than
+ if (rsinfo->expectedDesc == NULL)
+ {
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("xpath_table must be called as a table function")));
+ }
+
+/* The tuplestore must exist in a higher context than
* this function call (per_query_ctx is used) */
- per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
- oldcontext = MemoryContextSwitchTo(per_query_ctx);
+ per_query_ctx = rsinfo->econtext->ecxt_per_query_memory;
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
/* Create the tuplestore - work_mem is the max in-memory size before a
* file is created on disk to hold it.
*/
- tupstore = tuplestore_begin_heap(true, false, work_mem);
+ tupstore = tuplestore_begin_heap(true, false, work_mem);
- MemoryContextSwitchTo(oldcontext);
+ MemoryContextSwitchTo(oldcontext);
- /* get the requested return tuple description */
- ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);
+ /* get the requested return tuple description */
+ ret_tupdesc = CreateTupleDescCopy(rsinfo->expectedDesc);
- /* At the moment we assume that the returned attributes make sense
- * for the XPath specififed (i.e. we trust the caller).
- * It's not fatal if they get it wrong - the input function for the
- * column type will raise an error if the path result can't be converted
- * into the correct binary representation.
- */
+ /*
+ * At the moment we assume that the returned attributes make sense for
+ * the XPath specififed (i.e. we trust the caller). It's not fatal if
+ * they get it wrong - the input function for the column type will
+ * raise an error if the path result can't be converted into the
+ * correct binary representation.
+ */
- attinmeta = TupleDescGetAttInMetadata(ret_tupdesc);
+ attinmeta = TupleDescGetAttInMetadata(ret_tupdesc);
- /* We want to materialise because it means that we don't have to
- * carry libxml2 parser state between invocations of this function
- */
+ /*
+ * We want to materialise because it means that we don't have to carry
+ * libxml2 parser state between invocations of this function
+ */
- /* check to see if caller supports us returning a tuplestore */
- if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize))
- ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("xpath_table requires Materialize mode, but it is not "
- "allowed in this context")));
+ /* check to see if caller supports us returning a tuplestore */
+ if (!rsinfo || !(rsinfo->allowedModes & SFRM_Materialize))
+ ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR),
+ errmsg("xpath_table requires Materialize mode, but it is not "
+ "allowed in this context")));
- // Set return mode and allocate value space.
- rsinfo->returnMode = SFRM_Materialize;
- rsinfo->setDesc = ret_tupdesc;
-
- values = (char **) palloc(ret_tupdesc->natts * sizeof(char *));
+ /* Set return mode and allocate value space. */
+ rsinfo->returnMode = SFRM_Materialize;
+ rsinfo->setDesc = ret_tupdesc;
- xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *));
+ values = (char **) palloc(ret_tupdesc->natts * sizeof(char *));
- /* Split XPaths. xpathset is a writable CString. */
+ xpaths = (xmlChar **) palloc(ret_tupdesc->natts * sizeof(xmlChar *));
- /* Note that we stop splitting once we've done all needed for tupdesc */
+ /* Split XPaths. xpathset is a writable CString. */
- numpaths=0;
- pos = xpathset;
- do {
- xpaths[numpaths] = pos;
- pos = strstr(pos,pathsep);
- if (pos != NULL) {
- *pos = '\0';
- pos++;
- }
- numpaths++;
- } while ((pos != NULL) && (numpaths < (ret_tupdesc->natts - 1) ));
+ /* Note that we stop splitting once we've done all needed for tupdesc */
- /* Now build query */
+ numpaths = 0;
+ pos = xpathset;
+ do
+ {
+ xpaths[numpaths] = pos;
+ pos = strstr(pos, pathsep);
+ if (pos != NULL)
+ {
+ *pos = '\0';
+ pos++;
+ }
+ numpaths++;
+ } while ((pos != NULL) && (numpaths < (ret_tupdesc->natts - 1)));
+
+ /* Now build query */
- querysql = makeStringInfo();
+ querysql = makeStringInfo();
- /* Build initial sql statement */
- appendStringInfo(querysql, "SELECT %s, %s FROM %s WHERE %s",
- pkeyfield,
- xmlfield,
- relname,
- condition
- );
+ /* Build initial sql statement */
+ appendStringInfo(querysql, "SELECT %s, %s FROM %s WHERE %s",
+ pkeyfield,
+ xmlfield,
+ relname,
+ condition
+ );
- if ((ret = SPI_connect()) < 0) {
- elog(ERROR, "xpath_table: SPI_connect returned %d", ret);
- }
+ if ((ret = SPI_connect()) < 0)
+ elog(ERROR, "xpath_table: SPI_connect returned %d", ret);
- if ((ret = SPI_exec(querysql->data,0)) != SPI_OK_SELECT) {
- elog(ERROR,"xpath_table: SPI execution failed for query %s",querysql->data);
- }
+ if ((ret = SPI_exec(querysql->data, 0)) != SPI_OK_SELECT)
+ elog(ERROR, "xpath_table: SPI execution failed for query %s", querysql->data);
- proc= SPI_processed;
- /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */
- tuptable = SPI_tuptable;
- spi_tupdesc = tuptable->tupdesc;
+ proc = SPI_processed;
+ /* elog(DEBUG1,"xpath_table: SPI returned %d rows",proc); */
+ tuptable = SPI_tuptable;
+ spi_tupdesc = tuptable->tupdesc;
/* Switch out of SPI context */
- MemoryContextSwitchTo(oldcontext);
+ MemoryContextSwitchTo(oldcontext);
/* Check that SPI returned correct result. If you put a comma into one of
* the function parameters, this will catch it when the SPI query returns
- * e.g. 3 columns.
+ * e.g. 3 columns.
*/
- if (spi_tupdesc->natts != 2) {
- ereport(ERROR,(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("Expression returning multiple columns is not valid in parameter list"),
- errdetail("Expected two columns in SPI result, got %d",spi_tupdesc->natts)));
- }
+ if (spi_tupdesc->natts != 2)
+ {
+ ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("Expression returning multiple columns is not valid in parameter list"),
+ errdetail("Expected two columns in SPI result, got %d", spi_tupdesc->natts)));
+ }
/* Setup the parser. Beware that this must happen in the same context as the
* cleanup - which means that any error from here on must do cleanup to
* ensure that the entity table doesn't get freed by being out of context.
*/
- pgxml_parser_init();
-
- /* For each row i.e. document returned from SPI */
- for (i=0; i < proc; i++) {
- char *pkey;
- char *xmldoc;
-
- xmlDocPtr doctree;
- xmlXPathContextPtr ctxt;
- xmlXPathObjectPtr res;
- xmlChar *resstr;
-
-
- xmlXPathCompExprPtr comppath;
-
- /* Extract the row data as C Strings */
-
- spi_tuple = tuptable->vals[i];
- pkey = SPI_getvalue(spi_tuple, spi_tupdesc,1);
- xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc,2);
-
-
- /* Clear the values array, so that not-well-formed documents
- * return NULL in all columns.
- */
-
- /* Note that this also means that spare columns will be NULL. */
- for (j=0; j < ret_tupdesc->natts; j++) {
- values[j]= NULL;
- }
-
- /* Insert primary key */
- values[0]=pkey;
-
- /* Parse the document */
- doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
-
- if (doctree == NULL)
- { /* not well-formed, so output all-NULL tuple */
-
- ret_tuple = BuildTupleFromCStrings(attinmeta, values);
- oldcontext = MemoryContextSwitchTo(per_query_ctx);
- tuplestore_puttuple(tupstore, ret_tuple);
- MemoryContextSwitchTo(oldcontext);
- heap_freetuple(ret_tuple);
- }
- else
- {
- /* New loop here - we have to deal with nodeset results */
- rownr=0;
-
- do {
- /* Now evaluate the set of xpaths. */
- had_values=0;
- for (j=0; j < numpaths; j++) {
-
- ctxt = xmlXPathNewContext(doctree);
- ctxt->node = xmlDocGetRootElement(doctree);
- xmlSetGenericErrorFunc(ctxt, pgxml_errorHandler);
-
- /* compile the path */
- comppath = xmlXPathCompile(xpaths[j]);
- if (comppath == NULL)
- {
- xmlCleanupParser();
- xmlFreeDoc(doctree);
-
- elog_error(ERROR,"XPath Syntax Error",1);
-
- PG_RETURN_NULL(); /* Keep compiler happy */
- }
-
- /* Now evaluate the path expression. */
- res = xmlXPathCompiledEval(comppath, ctxt);
- xmlXPathFreeCompExpr(comppath);
-
- if (res != NULL)
- {
- switch (res->type)
- {
- case XPATH_NODESET:
- /* We see if this nodeset has enough nodes */
- if ((res->nodesetval != NULL) && (rownr < res->nodesetval->nodeNr)) {
- resstr =
- xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
- had_values=1;
- } else {
- resstr = NULL;
- }
-
- break;
-
- case XPATH_STRING:
- resstr = xmlStrdup(res->stringval);
- break;
-
- default:
- elog(NOTICE, "Unsupported XQuery result: %d", res->type);
- resstr = xmlStrdup("");
- }
-
-
- // Insert this into the appropriate column in the result tuple.
- values[j+1] = resstr;
- }
- xmlXPathFreeContext(ctxt);
- }
- // Now add the tuple to the output, if there is one.
- if (had_values) {
- ret_tuple = BuildTupleFromCStrings(attinmeta, values);
- oldcontext = MemoryContextSwitchTo(per_query_ctx);
- tuplestore_puttuple(tupstore, ret_tuple);
- MemoryContextSwitchTo(oldcontext);
- heap_freetuple(ret_tuple);
- }
-
- rownr++;
-
- } while (had_values);
-
- }
-
- xmlFreeDoc(doctree);
-
- pfree(pkey);
- pfree(xmldoc);
- }
-
- xmlCleanupParser();
+ pgxml_parser_init();
+
+ /* For each row i.e. document returned from SPI */
+ for (i = 0; i < proc; i++)
+ {
+ char *pkey;
+ char *xmldoc;
+
+ xmlDocPtr doctree;
+ xmlXPathContextPtr ctxt;
+ xmlXPathObjectPtr res;
+ xmlChar *resstr;
+
+
+ xmlXPathCompExprPtr comppath;
+
+ /* Extract the row data as C Strings */
+
+ spi_tuple = tuptable->vals[i];
+ pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
+ xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
+
+
+ /*
+ * Clear the values array, so that not-well-formed documents
+ * return NULL in all columns.
+ */
+
+ /* Note that this also means that spare columns will be NULL. */
+ for (j = 0; j < ret_tupdesc->natts; j++)
+ values[j] = NULL;
+
+ /* Insert primary key */
+ values[0] = pkey;
+
+ /* Parse the document */
+ doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
+
+ if (doctree == NULL)
+ { /* not well-formed, so output all-NULL
+ * tuple */
+
+ ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+ tuplestore_puttuple(tupstore, ret_tuple);
+ MemoryContextSwitchTo(oldcontext);
+ heap_freetuple(ret_tuple);
+ }
+ else
+ {
+ /* New loop here - we have to deal with nodeset results */
+ rownr = 0;
+
+ do
+ {
+ /* Now evaluate the set of xpaths. */
+ had_values = 0;
+ for (j = 0; j < numpaths; j++)
+ {
+
+ ctxt = xmlXPathNewContext(doctree);
+ ctxt->node = xmlDocGetRootElement(doctree);
+ xmlSetGenericErrorFunc(ctxt, pgxml_errorHandler);
+
+ /* compile the path */
+ comppath = xmlXPathCompile(xpaths[j]);
+ if (comppath == NULL)
+ {
+ xmlCleanupParser();
+ xmlFreeDoc(doctree);
+
+ elog_error(ERROR, "XPath Syntax Error", 1);
+
+ PG_RETURN_NULL(); /* Keep compiler happy */
+ }
+
+ /* Now evaluate the path expression. */
+ res = xmlXPathCompiledEval(comppath, ctxt);
+ xmlXPathFreeCompExpr(comppath);
+
+ if (res != NULL)
+ {
+ switch (res->type)
+ {
+ case XPATH_NODESET:
+ /* We see if this nodeset has enough nodes */
+ if ((res->nodesetval != NULL) && (rownr < res->nodesetval->nodeNr))
+ {
+ resstr =
+ xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
+ had_values = 1;
+ }
+ else
+ resstr = NULL;
+
+ break;
+
+ case XPATH_STRING:
+ resstr = xmlStrdup(res->stringval);
+ break;
+
+ default:
+ elog(NOTICE, "Unsupported XQuery result: %d", res->type);
+ resstr = xmlStrdup("");
+ }
+
+
+ /*
+ * Insert this into the appropriate column in the
+ * result tuple.
+ */
+ values[j + 1] = resstr;
+ }
+ xmlXPathFreeContext(ctxt);
+ }
+ /* Now add the tuple to the output, if there is one. */
+ if (had_values)
+ {
+ ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+ oldcontext = MemoryContextSwitchTo(per_query_ctx);
+ tuplestore_puttuple(tupstore, ret_tuple);
+ MemoryContextSwitchTo(oldcontext);
+ heap_freetuple(ret_tuple);
+ }
+
+ rownr++;
+
+ } while (had_values);
+
+ }
+
+ xmlFreeDoc(doctree);
+
+ pfree(pkey);
+ pfree(xmldoc);
+ }
+
+ xmlCleanupParser();
/* Needed to flag completeness in 7.3.1. 7.4 defines it as a no-op. */
- tuplestore_donestoring(tupstore);
-
- SPI_finish();
-
- rsinfo->setResult=tupstore;
-
- /*
- * SFRM_Materialize mode expects us to return a NULL Datum. The actual
- * tuples are in our tuplestore and passed back through
- * rsinfo->setResult. rsinfo->setDesc is set to the tuple description
- * that we actually used to build our tuples with, so the caller can
- * verify we did what it was expecting.
- */
- return (Datum) 0;
-
+ tuplestore_donestoring(tupstore);
+
+ SPI_finish();
+
+ rsinfo->setResult = tupstore;
+
+ /*
+ * SFRM_Materialize mode expects us to return a NULL Datum. The actual
+ * tuples are in our tuplestore and passed back through
+ * rsinfo->setResult. rsinfo->setDesc is set to the tuple description
+ * that we actually used to build our tuples with, so the caller can
+ * verify we did what it was expecting.
+ */
+ return (Datum) 0;
+
}
/* local defs */
static void parse_params(const char **params, text *paramstr);
-Datum xslt_process(PG_FUNCTION_ARGS);
+Datum xslt_process(PG_FUNCTION_ARGS);
#define MAXPARAMS 20
PG_FUNCTION_INFO_V1(xslt_process);
-Datum xslt_process(PG_FUNCTION_ARGS) {
-
-
- const char *params[MAXPARAMS + 1]; /* +1 for the terminator */
- xsltStylesheetPtr stylesheet = NULL;
- xmlDocPtr doctree;
- xmlDocPtr restree;
- xmlDocPtr ssdoc = NULL;
- xmlChar *resstr;
- int resstat;
- int reslen;
-
- text *doct = PG_GETARG_TEXT_P(0);
- text *ssheet = PG_GETARG_TEXT_P(1);
- text *paramstr;
- text *tres;
-
-
- if (fcinfo->nargs == 3)
- {
- paramstr = PG_GETARG_TEXT_P(2);
- parse_params(params,paramstr);
- }
- else /* No parameters */
- {
- params[0] = NULL;
- }
-
- /* Setup parser */
- pgxml_parser_init();
-
- /* Check to see if document is a file or a literal */
-
- if (VARDATA(doct)[0] == '<')
- {
- doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct)-VARHDRSZ);
- }
- else
- {
- doctree = xmlParseFile(GET_STR(doct));
- }
-
- if (doctree == NULL)
- {
- xmlCleanupParser();
- elog_error(ERROR,"Error parsing XML document",0);
-
- PG_RETURN_NULL();
- }
-
- /* Same for stylesheet */
- if (VARDATA(ssheet)[0] == '<')
- {
- ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
- VARSIZE(ssheet)-VARHDRSZ);
- if (ssdoc == NULL)
+Datum
+xslt_process(PG_FUNCTION_ARGS)
+{
+
+
+ const char *params[MAXPARAMS + 1]; /* +1 for the terminator */
+ xsltStylesheetPtr stylesheet = NULL;
+ xmlDocPtr doctree;
+ xmlDocPtr restree;
+ xmlDocPtr ssdoc = NULL;
+ xmlChar *resstr;
+ int resstat;
+ int reslen;
+
+ text *doct = PG_GETARG_TEXT_P(0);
+ text *ssheet = PG_GETARG_TEXT_P(1);
+ text *paramstr;
+ text *tres;
+
+
+ if (fcinfo->nargs == 3)
+ {
+ paramstr = PG_GETARG_TEXT_P(2);
+ parse_params(params, paramstr);
+ }
+ else
+/* No parameters */
+ params[0] = NULL;
+
+ /* Setup parser */
+ pgxml_parser_init();
+
+ /* Check to see if document is a file or a literal */
+
+ if (VARDATA(doct)[0] == '<')
+ doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
+ else
+ doctree = xmlParseFile(GET_STR(doct));
+
+ if (doctree == NULL)
+ {
+ xmlCleanupParser();
+ elog_error(ERROR, "Error parsing XML document", 0);
+
+ PG_RETURN_NULL();
+ }
+
+ /* Same for stylesheet */
+ if (VARDATA(ssheet)[0] == '<')
{
- xmlFreeDoc(doctree);
- xmlCleanupParser();
- elog_error(ERROR,"Error parsing stylesheet as XML document",0);
- PG_RETURN_NULL();
+ ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
+ VARSIZE(ssheet) - VARHDRSZ);
+ if (ssdoc == NULL)
+ {
+ xmlFreeDoc(doctree);
+ xmlCleanupParser();
+ elog_error(ERROR, "Error parsing stylesheet as XML document", 0);
+ PG_RETURN_NULL();
+ }
+
+ stylesheet = xsltParseStylesheetDoc(ssdoc);
}
+ else
+ stylesheet = xsltParseStylesheetFile(GET_STR(ssheet));
- stylesheet = xsltParseStylesheetDoc(ssdoc);
- }
- else
- {
- stylesheet = xsltParseStylesheetFile(GET_STR(ssheet));
- }
-
-
- if (stylesheet == NULL)
- {
- xmlFreeDoc(doctree);
- xsltCleanupGlobals();
- xmlCleanupParser();
- elog_error(ERROR,"Failed to parse stylesheet",0);
- PG_RETURN_NULL();
- }
-
- restree = xsltApplyStylesheet(stylesheet, doctree, params);
- resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
-
- xsltFreeStylesheet(stylesheet);
- xmlFreeDoc(restree);
- xmlFreeDoc(doctree);
-
- xsltCleanupGlobals();
- xmlCleanupParser();
-
- if (resstat < 0) {
- PG_RETURN_NULL();
- }
-
- tres = palloc(reslen + VARHDRSZ);
- memcpy(VARDATA(tres),resstr,reslen);
- VARATT_SIZEP(tres) = reslen + VARHDRSZ;
-
- PG_RETURN_TEXT_P(tres);
+
+ if (stylesheet == NULL)
+ {
+ xmlFreeDoc(doctree);
+ xsltCleanupGlobals();
+ xmlCleanupParser();
+ elog_error(ERROR, "Failed to parse stylesheet", 0);
+ PG_RETURN_NULL();
+ }
+
+ restree = xsltApplyStylesheet(stylesheet, doctree, params);
+ resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
+
+ xsltFreeStylesheet(stylesheet);
+ xmlFreeDoc(restree);
+ xmlFreeDoc(doctree);
+
+ xsltCleanupGlobals();
+ xmlCleanupParser();
+
+ if (resstat < 0)
+ PG_RETURN_NULL();
+
+ tres = palloc(reslen + VARHDRSZ);
+ memcpy(VARDATA(tres), resstr, reslen);
+ VARATT_SIZEP(tres) = reslen + VARHDRSZ;
+
+ PG_RETURN_TEXT_P(tres);
}
-void parse_params(const char **params, text *paramstr)
+void
+parse_params(const char **params, text *paramstr)
{
- char *pos;
- char *pstr;
-
- int i;
- char *nvsep="=";
- char *itsep=",";
-
- pstr = GET_STR(paramstr);
-
- pos=pstr;
-
- for (i=0; i < MAXPARAMS; i++)
- {
- params[i] = pos;
- pos = strstr(pos,nvsep);
- if (pos != NULL) {
- *pos = '\0';
- pos++;
- } else {
- params[i]=NULL;
- break;
- }
- /* Value */
- i++;
- params[i]=pos;
- pos = strstr(pos,itsep);
- if (pos != NULL) {
- *pos = '\0';
- pos++;
- } else {
- break;
- }
-
- }
- if (i < MAXPARAMS)
- {
- params[i+1]=NULL;
- }
+ char *pos;
+ char *pstr;
+
+ int i;
+ char *nvsep = "=";
+ char *itsep = ",";
+
+ pstr = GET_STR(paramstr);
+
+ pos = pstr;
+
+ for (i = 0; i < MAXPARAMS; i++)
+ {
+ params[i] = pos;
+ pos = strstr(pos, nvsep);
+ if (pos != NULL)
+ {
+ *pos = '\0';
+ pos++;
+ }
+ else
+ {
+ params[i] = NULL;
+ break;
+ }
+ /* Value */
+ i++;
+ params[i] = pos;
+ pos = strstr(pos, itsep);
+ if (pos != NULL)
+ {
+ *pos = '\0';
+ pos++;
+ }
+ else
+ break;
+
+ }
+ if (i < MAXPARAMS)
+ params[i + 1] = NULL;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.93 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.94 2004/08/29 05:06:39 momjian Exp $
*
* NOTES
* The old interface functions have been converted to macros
break;
/*
- * If the attribute number is 0, then we are supposed to return
- * the entire tuple as a row-type Datum. (Using zero for this
- * purpose is unclean since it risks confusion with "invalid attr"
- * result codes, but it's not worth changing now.)
+ * If the attribute number is 0, then we are supposed to
+ * return the entire tuple as a row-type Datum. (Using zero
+ * for this purpose is unclean since it risks confusion with
+ * "invalid attr" result codes, but it's not worth changing
+ * now.)
*
- * We have to make a copy of the tuple so we can safely insert the
- * Datum overhead fields, which are not set in on-disk tuples.
+ * We have to make a copy of the tuple so we can safely insert
+ * the Datum overhead fields, which are not set in on-disk
+ * tuples.
*/
case InvalidAttrNumber:
{
- HeapTupleHeader dtup;
+ HeapTupleHeader dtup;
dtup = (HeapTupleHeader) palloc(tup->t_len);
memcpy((char *) dtup, (char *) tup->t_data, tup->t_len);
* construct a tuple from the given values[] and nulls[] arrays
*
* Null attributes are indicated by a 'n' in the appropriate byte
- * of nulls[]. Non-null attributes are indicated by a ' ' (space).
+ * of nulls[]. Non-null attributes are indicated by a ' ' (space).
* ----------------
*/
HeapTuple
/*
* Check for nulls and embedded tuples; expand any toasted attributes
- * in embedded tuples. This preserves the invariant that toasting can
+ * in embedded tuples. This preserves the invariant that toasting can
* only go one level deep.
*
* We can skip calling toast_flatten_tuple_attribute() if the attribute
len += ComputeDataSize(tupleDescriptor, values, nulls);
/*
- * Allocate and zero the space needed. Note that the tuple body and
+ * Allocate and zero the space needed. Note that the tuple body and
* HeapTupleData management structure are allocated in one chunk.
*/
tuple = (HeapTuple) palloc0(HEAPTUPLESIZE + len);
* allocate and fill values and nulls arrays from either the tuple or
* the repl information, as appropriate.
*
- * NOTE: it's debatable whether to use heap_deformtuple() here or
- * just heap_getattr() only the non-replaced colums. The latter could
- * win if there are many replaced columns and few non-replaced ones.
+ * NOTE: it's debatable whether to use heap_deformtuple() here or just
+ * heap_getattr() only the non-replaced colums. The latter could win
+ * if there are many replaced columns and few non-replaced ones.
* However, heap_deformtuple costs only O(N) while the heap_getattr
* way would cost O(N^2) if there are many non-replaced columns, so it
* seems better to err on the side of linear cost.
bool slow = false; /* can we use/set attcacheoff? */
natts = tup->t_natts;
+
/*
- * In inheritance situations, it is possible that the given tuple actually
- * has more fields than the caller is expecting. Don't run off the end
- * of the caller's arrays.
+ * In inheritance situations, it is possible that the given tuple
+ * actually has more fields than the caller is expecting. Don't run
+ * off the end of the caller's arrays.
*/
natts = Min(natts, tdesc_natts);
nulls[attnum] = ' ';
if (!slow && att[attnum]->attcacheoff >= 0)
- {
off = att[attnum]->attcacheoff;
- }
else
{
off = att_align(off, att[attnum]->attalign);
}
/*
- * If tuple doesn't have all the atts indicated by tupleDesc, read
- * the rest as null
+ * If tuple doesn't have all the atts indicated by tupleDesc, read the
+ * rest as null
*/
for (; attnum < tdesc_natts; attnum++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.70 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/indextuple.c,v 1.71 2004/08/29 05:06:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if ((size & INDEX_SIZE_MASK) != size)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("index row requires %lu bytes, maximum size is %lu",
- (unsigned long) size,
- (unsigned long) INDEX_SIZE_MASK)));
+ errmsg("index row requires %lu bytes, maximum size is %lu",
+ (unsigned long) size,
+ (unsigned long) INDEX_SIZE_MASK)));
infomask |= size;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.84 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/printtup.c,v 1.85 2004/08/29 05:06:39 momjian Exp $
*
*-------------------------------------------------------------------------
*/
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typioparam),
+ ObjectIdGetDatum(thisState->typioparam),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false);
pfree(outputstr);
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typioparam)));
+ ObjectIdGetDatum(thisState->typioparam)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
outputstr = DatumGetCString(FunctionCall3(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typioparam),
+ ObjectIdGetDatum(thisState->typioparam),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), true);
pfree(outputstr);
value = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(typeinfo->attrs[i]->atttypmod)));
printatt((unsigned) i + 1, typeinfo->attrs[i], value);
outputbytes = DatumGetByteaP(FunctionCall2(&thisState->finfo,
attr,
- ObjectIdGetDatum(thisState->typioparam)));
+ ObjectIdGetDatum(thisState->typioparam)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.105 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.106 2004/08/29 05:06:39 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
/*
* Allocate enough memory for the tuple descriptor, and zero the
- * attrs[] array since TupleDescInitEntry assumes that the array
- * is filled with NULL pointers.
+ * attrs[] array since TupleDescInitEntry assumes that the array is
+ * filled with NULL pointers.
*/
desc = (TupleDesc) palloc(sizeof(struct tupleDesc));
/*
* Note: attributeName can be NULL, because the planner doesn't always
- * fill in valid resname values in targetlists, particularly for resjunk
- * attributes.
+ * fill in valid resname values in targetlists, particularly for
+ * resjunk attributes.
*/
if (attributeName != NULL)
namestrcpy(&(att->attname), attributeName);
* Given a relation schema (list of ColumnDef nodes), build a TupleDesc.
*
* Note: the default assumption is no OIDs; caller may modify the returned
- * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
+ * TupleDesc if it wants OIDs. Also, tdtypeid will need to be filled in
* later on.
*/
TupleDesc
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.110 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gist.c,v 1.111 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum attr[INDEX_MAX_KEYS];
bool whatfree[INDEX_MAX_KEYS];
char isnull[INDEX_MAX_KEYS];
- GistEntryVector *evec;
+ GistEntryVector *evec;
Datum datum;
int datumsize,
i,
{
evec->n = 2;
gistentryinit(evec->vector[1],
- evec->vector[0].key, r, NULL,
- (OffsetNumber) 0, evec->vector[0].bytes, FALSE);
+ evec->vector[0].key, r, NULL,
+ (OffsetNumber) 0, evec->vector[0].bytes, FALSE);
}
else
static IndexTuple
gistgetadjusted(Relation r, IndexTuple oldtup, IndexTuple addtup, GISTSTATE *giststate)
{
- GistEntryVector *evec;
+ GistEntryVector *evec;
Datum datum;
int datumsize;
bool result,
int len,
*attrsize;
OffsetNumber *entries;
- GistEntryVector *evec;
+ GistEntryVector *evec;
Datum datum;
int datumsize;
int reallen;
else
{
/*
- * evec->vector[0].bytes may be not
- * defined, so form union with itself
+ * evec->vector[0].bytes may be not defined, so form union
+ * with itself
*/
if (reallen == 1)
{
*ev1p;
float lpenalty,
rpenalty;
- GistEntryVector *evec;
+ GistEntryVector *evec;
int datumsize;
bool isnull[INDEX_MAX_KEYS];
int i,
rbknum;
GISTPageOpaque opaque;
GIST_SPLITVEC v;
- GistEntryVector *entryvec;
+ GistEntryVector *entryvec;
bool *decompvec;
int i,
j,
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.41 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.42 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
FALSE, isNull);
/*
- * Call the Consistent function to evaluate the test. The arguments
- * are the index datum (as a GISTENTRY*), the comparison datum, and
- * the comparison operator's strategy number and subtype from pg_amop.
+ * Call the Consistent function to evaluate the test. The
+ * arguments are the index datum (as a GISTENTRY*), the comparison
+ * datum, and the comparison operator's strategy number and
+ * subtype from pg_amop.
*
* (Presently there's no need to pass the subtype since it'll always
* be zero, but might as well pass it for possible future use.)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.54 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/gist/gistscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* the sk_subtype field.
*/
for (i = 0; i < s->numberOfKeys; i++)
- {
s->keyData[i].sk_func = p->giststate->consistentFn[s->keyData[i].sk_attno - 1];
- }
}
PG_RETURN_VOID();
GISTScanList next;
/*
- * Note: this should be a no-op during normal query shutdown.
- * However, in an abort situation ExecutorEnd is not called and so
- * there may be open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However,
+ * in an abort situation ExecutorEnd is not called and so there may be
+ * open index scans to clean up.
*/
prev = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.72 2004/08/29 04:12:17 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.73 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* This file contains only the public interface routines.
bool res;
/*
- * We hold pin but not lock on current buffer while outside the hash AM.
- * Reacquire the read lock here.
+ * We hold pin but not lock on current buffer while outside the hash
+ * AM. Reacquire the read lock here.
*/
if (BufferIsValid(so->hashso_curbuf))
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
/*
* Read the metapage to fetch original bucket and tuple counts. Also,
* we keep a copy of the last-seen metapage so that we can use its
- * hashm_spares[] values to compute bucket page addresses. This is a
+ * hashm_spares[] values to compute bucket page addresses. This is a
* bit hokey but perfectly safe, since the interesting entries in the
* spares array cannot change under us; and it beats rereading the
* metapage for each bucket.
ItemPointer htup;
hitem = (HashItem) PageGetItem(page,
- PageGetItemId(page, offno));
+ PageGetItemId(page, offno));
htup = &(hitem->hash_itup.t_tid);
if (callback(htup, callback_state))
{
orig_ntuples == metap->hashm_ntuples)
{
/*
- * No one has split or inserted anything since start of scan,
- * so believe our count as gospel.
+ * No one has split or inserted anything since start of scan, so
+ * believe our count as gospel.
*/
metap->hashm_ntuples = num_index_tuples;
}
{
/*
* Otherwise, our count is untrustworthy since we may have
- * double-scanned tuples in split buckets. Proceed by
+ * double-scanned tuples in split buckets. Proceed by
* dead-reckoning.
*/
if (metap->hashm_ntuples > tuples_removed)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.33 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashinsert.c,v 1.34 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static OffsetNumber _hash_pgaddtup(Relation rel, Buffer buf,
- Size itemsize, HashItem hitem);
+ Size itemsize, HashItem hitem);
/*
/*
* Check whether the item can fit on a hash page at all. (Eventually,
- * we ought to try to apply TOAST methods if not.) Note that at this
+ * we ought to try to apply TOAST methods if not.) Note that at this
* point, itemsz doesn't include the ItemId.
*/
if (itemsz > HashMaxItemSize((Page) metap))
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
/*
- * Acquire share lock on target bucket; then we can release split lock.
+ * Acquire share lock on target bucket; then we can release split
+ * lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
/*
* no space on this page; check for an overflow page
*/
- BlockNumber nextblkno = pageopaque->hasho_nextblkno;
+ BlockNumber nextblkno = pageopaque->hasho_nextblkno;
if (BlockNumberIsValid(nextblkno))
{
_hash_droplock(rel, blkno, HASH_SHARE);
/*
- * Write-lock the metapage so we can increment the tuple count.
- * After incrementing it, check to see if it's time for a split.
+ * Write-lock the metapage so we can increment the tuple count. After
+ * incrementing it, check to see if it's time for a split.
*/
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.43 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.44 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
for (i = 1;
i < splitnum && ovflbitnum > metap->hashm_spares[i];
i++)
- /* loop */ ;
+ /* loop */ ;
/*
- * Convert to absolute page number by adding the number of bucket pages
- * that exist before this split point.
+ * Convert to absolute page number by adding the number of bucket
+ * pages that exist before this split point.
*/
return (BlockNumber) ((1 << i) + ovflbitnum);
}
*
* Add an overflow page to the bucket whose last page is pointed to by 'buf'.
*
- * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
+ * On entry, the caller must hold a pin but no lock on 'buf'. The pin is
* dropped before exiting (we assume the caller is not interested in 'buf'
* anymore). The returned overflow page will be pinned and write-locked;
* it is guaranteed to be empty.
* That buffer is returned in the same state.
*
* The caller must hold at least share lock on the bucket, to ensure that
- * no one else tries to compact the bucket meanwhile. This guarantees that
+ * no one else tries to compact the bucket meanwhile. This guarantees that
* 'buf' won't stop being part of the bucket while it's unlocked.
*
* NB: since this could be executed concurrently by multiple processes,
* one should not assume that the returned overflow page will be the
- * immediate successor of the originally passed 'buf'. Additional overflow
+ * immediate successor of the originally passed 'buf'. Additional overflow
* pages might have been added to the bucket chain in between.
*/
Buffer
/* outer loop iterates once per bitmap page */
for (;;)
{
- BlockNumber mapblkno;
+ BlockNumber mapblkno;
Page mappage;
uint32 last_inpage;
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't
- * risk changing it if someone moved it while we were searching
- * bitmap pages.
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * changing it if someone moved it while we were searching bitmap
+ * pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
metap->hashm_firstfree = bit + 1;
blkno = bitno_to_blkno(metap, bit);
/*
- * Adjust hashm_firstfree to avoid redundant searches. But don't
- * risk changing it if someone moved it while we were searching
- * bitmap pages.
+ * Adjust hashm_firstfree to avoid redundant searches. But don't risk
+ * changing it if someone moved it while we were searching bitmap
+ * pages.
*/
if (metap->hashm_firstfree == orig_firstfree)
{
Bucket bucket;
/* Get information from the doomed page */
- ovflblkno = BufferGetBlockNumber(ovflbuf);
+ ovflblkno = BufferGetBlockNumber(ovflbuf);
ovflpage = BufferGetPage(ovflbuf);
_hash_checkpage(rel, ovflpage, LH_OVERFLOW_PAGE);
ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
/*
* Fix up the bucket chain. this is a doubly-linked list, so we must
* fix up the bucket chain members behind and ahead of the overflow
- * page being deleted. No concurrency issues since we hold exclusive
+ * page being deleted. No concurrency issues since we hold exclusive
* lock on the entire bucket.
*/
if (BlockNumberIsValid(prevblkno))
/*
* It is okay to write-lock the new bitmap page while holding metapage
- * write lock, because no one else could be contending for the new page.
+ * write lock, because no one else could be contending for the new
+ * page.
*
* There is some loss of concurrency in possibly doing I/O for the new
* page while holding the metapage lock, but this path is taken so
/*
* delete the tuple from the "read" page. PageIndexTupleDelete
- * repacks the ItemId array, so 'roffnum' will be "advanced" to
- * the "next" ItemId.
+ * repacks the ItemId array, so 'roffnum' will be "advanced"
+ * to the "next" ItemId.
*/
PageIndexTupleDelete(rpage, roffnum);
}
* Tricky point here: if our read and write pages are adjacent in the
* bucket chain, our write lock on wbuf will conflict with
* _hash_freeovflpage's attempt to update the sibling links of the
- * removed page. However, in that case we are done anyway, so we can
- * simply drop the write lock before calling _hash_freeovflpage.
+ * removed page. However, in that case we are done anyway, so we
+ * can simply drop the write lock before calling
+ * _hash_freeovflpage.
*/
if (PageIsEmpty(rpage))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.45 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.46 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
static void _hash_splitbucket(Relation rel, Buffer metabuf,
- Bucket obucket, Bucket nbucket,
- BlockNumber start_oblkno,
- BlockNumber start_nblkno,
- uint32 maxbucket,
- uint32 highmask, uint32 lowmask);
+ Bucket obucket, Bucket nbucket,
+ BlockNumber start_oblkno,
+ BlockNumber start_nblkno,
+ uint32 maxbucket,
+ uint32 highmask, uint32 lowmask);
/*
* of the locking rules). However, we can skip taking lmgr locks when the
* index is local to the current backend (ie, either temp or new in the
* current transaction). No one else can see it, so there's no reason to
- * take locks. We still take buffer-level locks, but not lmgr locks.
+ * take locks. We still take buffer-level locks, but not lmgr locks.
*/
#define USELOCKING(rel) (!RELATION_IS_LOCAL(rel))
RelationGetRelationName(rel));
/*
- * Determine the target fill factor (tuples per bucket) for this index.
- * The idea is to make the fill factor correspond to pages about 3/4ths
- * full. We can compute it exactly if the index datatype is fixed-width,
- * but for var-width there's some guessing involved.
+ * Determine the target fill factor (tuples per bucket) for this
+ * index. The idea is to make the fill factor correspond to pages
+ * about 3/4ths full. We can compute it exactly if the index datatype
+ * is fixed-width, but for var-width there's some guessing involved.
*/
data_width = get_typavgwidth(RelationGetDescr(rel)->attrs[0]->atttypid,
- RelationGetDescr(rel)->attrs[0]->atttypmod);
+ RelationGetDescr(rel)->attrs[0]->atttypmod);
item_width = MAXALIGN(sizeof(HashItemData)) + MAXALIGN(data_width) +
sizeof(ItemIdData); /* include the line pointer */
ffactor = (BLCKSZ * 3 / 4) / item_width;
metap->hashm_procid = index_getprocid(rel, 1, HASHPROC);
/*
- * We initialize the index with two buckets, 0 and 1, occupying physical
- * blocks 1 and 2. The first freespace bitmap page is in block 3.
+ * We initialize the index with two buckets, 0 and 1, occupying
+ * physical blocks 1 and 2. The first freespace bitmap page is in
+ * block 3.
*/
metap->hashm_maxbucket = metap->hashm_lowmask = 1; /* nbuckets - 1 */
metap->hashm_highmask = 3; /* (nbuckets << 1) - 1 */
MemSet((char *) metap->hashm_spares, 0, sizeof(metap->hashm_spares));
MemSet((char *) metap->hashm_mapp, 0, sizeof(metap->hashm_mapp));
- metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
+ metap->hashm_spares[1] = 1; /* the first bitmap page is only spare */
metap->hashm_ovflpoint = 1;
metap->hashm_firstfree = 0;
}
/*
- * Initialize first bitmap page. Can't do this until we
- * create the first two buckets, else smgr will complain.
+ * Initialize first bitmap page. Can't do this until we create the
+ * first two buckets, else smgr will complain.
*/
_hash_initbitmap(rel, metap, 3);
uint32 lowmask;
/*
- * Obtain the page-zero lock to assert the right to begin a split
- * (see README).
+ * Obtain the page-zero lock to assert the right to begin a split (see
+ * README).
*
* Note: deadlock should be impossible here. Our own backend could only
- * be holding bucket sharelocks due to stopped indexscans; those will not
- * block other holders of the page-zero lock, who are only interested in
- * acquiring bucket sharelocks themselves. Exclusive bucket locks are
- * only taken here and in hashbulkdelete, and neither of these operations
- * needs any additional locks to complete. (If, due to some flaw in this
- * reasoning, we manage to deadlock anyway, it's okay to error out; the
- * index will be left in a consistent state.)
+ * be holding bucket sharelocks due to stopped indexscans; those will
+ * not block other holders of the page-zero lock, who are only
+ * interested in acquiring bucket sharelocks themselves. Exclusive
+ * bucket locks are only taken here and in hashbulkdelete, and neither
+ * of these operations needs any additional locks to complete. (If,
+ * due to some flaw in this reasoning, we manage to deadlock anyway,
+ * it's okay to error out; the index will be left in a consistent
+ * state.)
*/
_hash_getlock(rel, 0, HASH_EXCLUSIVE);
_hash_checkpage(rel, (Page) metap, LH_META_PAGE);
/*
- * Check to see if split is still needed; someone else might have already
- * done one while we waited for the lock.
+ * Check to see if split is still needed; someone else might have
+ * already done one while we waited for the lock.
*
* Make sure this stays in sync with_hash_doinsert()
*/
/*
* Determine which bucket is to be split, and attempt to lock the old
- * bucket. If we can't get the lock, give up.
+ * bucket. If we can't get the lock, give up.
*
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
- * Ideally we would lock the new bucket too before proceeding, but if
- * we are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
+ * Ideally we would lock the new bucket too before proceeding, but if we
+ * are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping
* isn't correct yet. For simplicity we update the metapage first and
- * then lock. This should be okay because no one else should be trying
- * to lock the new bucket yet...
+ * then lock. This should be okay because no one else should be
+ * trying to lock the new bucket yet...
*/
new_bucket = metap->hashm_maxbucket + 1;
old_bucket = (new_bucket & metap->hashm_lowmask);
goto fail;
/*
- * Okay to proceed with split. Update the metapage bucket mapping info.
+ * Okay to proceed with split. Update the metapage bucket mapping
+ * info.
*/
metap->hashm_maxbucket = new_bucket;
/*
* If the split point is increasing (hashm_maxbucket's log base 2
* increases), we need to adjust the hashm_spares[] array and
- * hashm_ovflpoint so that future overflow pages will be created beyond
- * this new batch of bucket pages.
+ * hashm_ovflpoint so that future overflow pages will be created
+ * beyond this new batch of bucket pages.
*
- * XXX should initialize new bucket pages to prevent out-of-order
- * page creation? Don't wanna do it right here though.
+ * XXX should initialize new bucket pages to prevent out-of-order page
+ * creation? Don't wanna do it right here though.
*/
spare_ndx = _hash_log2(metap->hashm_maxbucket + 1);
if (spare_ndx > metap->hashm_ovflpoint)
/*
* Copy bucket mapping info now; this saves re-accessing the meta page
* inside _hash_splitbucket's inner loop. Note that once we drop the
- * split lock, other splits could begin, so these values might be out of
- * date before _hash_splitbucket finishes. That's okay, since all it
- * needs is to tell which of these two buckets to map hashkeys into.
+ * split lock, other splits could begin, so these values might be out
+ * of date before _hash_splitbucket finishes. That's okay, since all
+ * it needs is to tell which of these two buckets to map hashkeys
+ * into.
*/
maxbucket = metap->hashm_maxbucket;
highmask = metap->hashm_highmask;
/*
* It should be okay to simultaneously write-lock pages from each
- * bucket, since no one else can be trying to acquire buffer lock
- * on pages of either bucket.
+ * bucket, since no one else can be trying to acquire buffer lock on
+ * pages of either bucket.
*/
oblkno = start_oblkno;
nblkno = start_nblkno;
nopaque->hasho_filler = HASHO_FILL;
/*
- * Partition the tuples in the old bucket between the old bucket and the
- * new bucket, advancing along the old bucket's overflow bucket chain
- * and adding overflow pages to the new bucket as needed.
+ * Partition the tuples in the old bucket between the old bucket and
+ * the new bucket, advancing along the old bucket's overflow bucket
+ * chain and adding overflow pages to the new bucket as needed.
*/
ooffnum = FirstOffsetNumber;
omaxoffnum = PageGetMaxOffsetNumber(opage);
oblkno = oopaque->hasho_nextblkno;
if (!BlockNumberIsValid(oblkno))
break;
+
/*
- * we ran out of tuples on this particular page, but we
- * have more overflow pages; advance to next page.
+ * we ran out of tuples on this particular page, but we have
+ * more overflow pages; advance to next page.
*/
_hash_wrtbuf(rel, obuf);
/*
* Re-hash the tuple to determine which bucket it now belongs in.
*
- * It is annoying to call the hash function while holding locks,
- * but releasing and relocking the page for each tuple is unappealing
+ * It is annoying to call the hash function while holding locks, but
+ * releasing and relocking the page for each tuple is unappealing
* too.
*/
hitem = (HashItem) PageGetItem(opage, PageGetItemId(opage, ooffnum));
}
/*
- * We're at the end of the old bucket chain, so we're done partitioning
- * the tuples. Before quitting, call _hash_squeezebucket to ensure the
- * tuples remaining in the old bucket (including the overflow pages) are
- * packed as tightly as possible. The new bucket is already tight.
+ * We're at the end of the old bucket chain, so we're done
+ * partitioning the tuples. Before quitting, call _hash_squeezebucket
+ * to ensure the tuples remaining in the old bucket (including the
+ * overflow pages) are packed as tightly as possible. The new bucket
+ * is already tight.
*/
_hash_wrtbuf(rel, obuf);
_hash_wrtbuf(rel, nbuf);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashscan.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
HashScanList next;
/*
- * Note: this should be a no-op during normal query shutdown.
- * However, in an abort situation ExecutorEnd is not called and so
- * there may be open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However,
+ * in an abort situation ExecutorEnd is not called and so there may be
+ * open index scans to clean up.
*/
prev = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.36 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashsearch.c,v 1.37 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* We do not support hash scans with no index qualification, because
* we would have to read the whole index rather than just one bucket.
* That creates a whole raft of problems, since we haven't got a
- * practical way to lock all the buckets against splits or compactions.
+ * practical way to lock all the buckets against splits or
+ * compactions.
*/
if (scan->numberOfKeys < 1)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("hash indexes do not support whole-index scans")));
+ errmsg("hash indexes do not support whole-index scans")));
/*
* If the constant in the index qual is NULL, assume it cannot match
_hash_relbuf(rel, metabuf);
/*
- * Acquire share lock on target bucket; then we can release split lock.
+ * Acquire share lock on target bucket; then we can release split
+ * lock.
*/
_hash_getlock(rel, blkno, HASH_SHARE);
while (offnum > maxoff)
{
/*
- * either this page is empty
- * (maxoff == InvalidOffsetNumber)
- * or we ran off the end.
+ * either this page is empty (maxoff ==
+ * InvalidOffsetNumber) or we ran off the end.
*/
_hash_readnext(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
while (offnum < FirstOffsetNumber)
{
/*
- * either this page is empty
- * (offnum == InvalidOffsetNumber)
- * or we ran off the end.
+ * either this page is empty (offnum ==
+ * InvalidOffsetNumber) or we ran off the end.
*/
_hash_readprev(rel, &buf, &page, &opaque);
if (BufferIsValid(buf))
- {
maxoff = offnum = PageGetMaxOffsetNumber(page);
- }
else
{
/* end of bucket */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.39 2004/08/29 04:12:18 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.40 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
_hash_checkpage(Relation rel, Page page, int flags)
{
Assert(page);
+
/*
* When checking the metapage, always verify magic number and version.
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.172 2004/08/29 04:12:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.173 2004/08/29 05:06:40 momjian Exp $
*
*
* INTERFACE ROUTINES
/*
* Determine the number of blocks we have to scan.
*
- * It is sufficient to do this once at scan start, since any tuples
- * added while the scan is in progress will be invisible to my
- * transaction anyway...
+ * It is sufficient to do this once at scan start, since any tuples added
+ * while the scan is in progress will be invisible to my transaction
+ * anyway...
*/
scan->rs_nblocks = RelationGetNumberOfBlocks(scan->rs_rd);
tup->t_data->t_infomask |= HEAP_XMAX_INVALID;
HeapTupleHeaderSetXmin(tup->t_data, GetCurrentTransactionId());
HeapTupleHeaderSetCmin(tup->t_data, cid);
- HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */
+ HeapTupleHeaderSetCmax(tup->t_data, 0); /* zero out Datum fields */
tup->t_tableOid = relation->rd_id;
/*
* If the new tuple is too big for storage or contains already toasted
- * out-of-line attributes from some other relation, invoke the toaster.
+ * out-of-line attributes from some other relation, invoke the
+ * toaster.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
*/
int
heap_delete(Relation relation, ItemPointer tid,
- ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
+ ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
{
ItemId lp;
HeapTupleData tp;
/*
* If the tuple has toasted out-of-line attributes, we need to delete
- * those items too. We have to do this before WriteBuffer because we need
- * to look at the contents of the tuple, but it's OK to release the
- * context lock on the buffer first.
+ * those items too. We have to do this before WriteBuffer because we
+ * need to look at the contents of the tuple, but it's OK to release
+ * the context lock on the buffer first.
*/
if (HeapTupleHasExternal(&tp))
heap_tuple_toast_attrs(relation, NULL, &tp);
result = heap_delete(relation, tid,
&ctid,
GetCurrentCommandId(), SnapshotAny,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
*/
int
heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
- ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
+ ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait)
{
ItemId lp;
HeapTupleData oldtup;
result = heap_update(relation, otid, tup,
&ctid,
GetCurrentCommandId(), SnapshotAny,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
Page page;
/*
- * Note: the NEWPAGE log record is used for both heaps and indexes,
- * so do not do anything that assumes we are touching a heap.
+ * Note: the NEWPAGE log record is used for both heaps and indexes, so
+ * do not do anything that assumes we are touching a heap.
*/
if (!redo || (record->xl_info & XLR_BKP_BLOCK_1))
out_target(char *buf, xl_heaptid *target)
{
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.44 2004/08/29 04:12:20 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.45 2004/08/29 05:06:40 momjian Exp $
*
*
* INTERFACE ROUTINES
/*
* Get the tuple descriptor and break down the tuple into fields.
*
- * NOTE: it's debatable whether to use heap_deformtuple() here or
- * just heap_getattr() only the varlena columns. The latter could
- * win if there are few varlena columns and many non-varlena ones.
- * However, heap_deformtuple costs only O(N) while the heap_getattr
- * way would cost O(N^2) if there are many varlena columns, so it
- * seems better to err on the side of linear cost. (We won't even
- * be here unless there's at least one varlena column, by the way.)
+ * NOTE: it's debatable whether to use heap_deformtuple() here or just
+ * heap_getattr() only the varlena columns. The latter could win if
+ * there are few varlena columns and many non-varlena ones. However,
+ * heap_deformtuple costs only O(N) while the heap_getattr way would
+ * cost O(N^2) if there are many varlena columns, so it seems better
+ * to err on the side of linear cost. (We won't even be here unless
+ * there's at least one varlena column, by the way.)
*/
tupleDesc = rel->rd_att;
att = tupleDesc->attrs;
{
if (att[i]->attlen == -1)
{
- Datum value = toast_values[i];
+ Datum value = toast_values[i];
if (toast_nulls[i] != 'n' && VARATT_IS_EXTERNAL(value))
toast_delete_datum(rel, value);
*
* If a Datum is of composite type, "flatten" it to contain no toasted fields.
* This must be invoked on any potentially-composite field that is to be
- * inserted into a tuple. Doing this preserves the invariant that toasting
+ * inserted into a tuple. Doing this preserves the invariant that toasting
* goes only one level deep in a tuple.
* ----------
*/
ScanKeyInit(&toastkey,
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Find the chunks by index
ScanKeyInit(&toastkey,
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Read the chunks by index
ScanKeyInit(&toastkey[0],
(AttrNumber) 1,
BTEqualStrategyNumber, F_OIDEQ,
- ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
+ ObjectIdGetDatum(attr->va_content.va_external.va_valueid));
/*
* Use equality condition for one chunk, a range condition otherwise:
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.115 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.116 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* We can skip items that are marked killed.
*
* Formerly, we applied _bt_isequal() before checking the kill
- * flag, so as to fall out of the item loop as soon as possible.
- * However, in the presence of heavy update activity an index
- * may contain many killed items with the same key; running
- * _bt_isequal() on each killed item gets expensive. Furthermore
- * it is likely that the non-killed version of each key appears
- * first, so that we didn't actually get to exit any sooner anyway.
- * So now we just advance over killed items as quickly as we can.
- * We only apply _bt_isequal() when we get to a non-killed item or
- * the end of the page.
+ * flag, so as to fall out of the item loop as soon as
+ * possible. However, in the presence of heavy update activity
+ * an index may contain many killed items with the same key;
+ * running _bt_isequal() on each killed item gets expensive.
+ * Furthermore it is likely that the non-killed version of
+ * each key appears first, so that we didn't actually get to
+ * exit any sooner anyway. So now we just advance over killed
+ * items as quickly as we can. We only apply _bt_isequal()
+ * when we get to a non-killed item or the end of the page.
*/
if (!ItemIdDeleted(curitemid))
{
/*
- * _bt_compare returns 0 for (1,NULL) and (1,NULL) - this's
- * how we handling NULLs - and so we must not use _bt_compare
- * in real comparison, but only for ordering/finding items on
- * pages. - vadim 03/24/97
+ * _bt_compare returns 0 for (1,NULL) and (1,NULL) -
+ * this's how we handling NULLs - and so we must not use
+ * _bt_compare in real comparison, but only for
+ * ordering/finding items on pages. - vadim 03/24/97
*/
if (!_bt_isequal(itupdesc, page, offset, natts, itup_scankey))
- break; /* we're past all the equal tuples */
+ break; /* we're past all the equal tuples */
/* okay, we gotta fetch the heap tuple ... */
cbti = (BTItem) PageGetItem(page, curitemid);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.79 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.80 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
rootlevel = metad->btm_fastlevel;
/*
- * We are done with the metapage; arrange to release it via
- * first _bt_relandgetbuf call
+ * We are done with the metapage; arrange to release it via first
+ * _bt_relandgetbuf call
*/
rootbuf = metabuf;
rootlevel = metad->btm_level;
/*
- * We are done with the metapage; arrange to release it via
- * first _bt_relandgetbuf call
+ * We are done with the metapage; arrange to release it via first
+ * _bt_relandgetbuf call
*/
rootbuf = metabuf;
* page could have been re-used between the time the last VACUUM
* scanned it and the time the VACUUM made its FSM updates.)
*
- * In fact, it's worse than that: we can't even assume that it's
- * safe to take a lock on the reported page. If somebody else
- * has a lock on it, or even worse our own caller does, we could
+ * In fact, it's worse than that: we can't even assume that it's safe
+ * to take a lock on the reported page. If somebody else has a
+ * lock on it, or even worse our own caller does, we could
* deadlock. (The own-caller scenario is actually not improbable.
* Consider an index on a serial or timestamp column. Nearly all
* splits will be at the rightmost page, so it's entirely likely
- * that _bt_split will call us while holding a lock on the page most
- * recently acquired from FSM. A VACUUM running concurrently with
- * the previous split could well have placed that page back in FSM.)
+ * that _bt_split will call us while holding a lock on the page
+ * most recently acquired from FSM. A VACUUM running concurrently
+ * with the previous split could well have placed that page back
+ * in FSM.)
*
* To get around that, we ask for only a conditional lock on the
- * reported page. If we fail, then someone else is using the page,
- * and we may reasonably assume it's not free. (If we happen to be
- * wrong, the worst consequence is the page will be lost to use till
- * the next VACUUM, which is no big problem.)
+ * reported page. If we fail, then someone else is using the
+ * page, and we may reasonably assume it's not free. (If we
+ * happen to be wrong, the worst consequence is the page will be
+ * lost to use till the next VACUUM, which is no big problem.)
*/
for (;;)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.88 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.89 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
opaque = (BTPageOpaque) PageGetSpecialPointer(page);
/*
- * When nextkey = false (normal case): if the scan key that brought us to
- * this page is > the high key stored on the page, then the page has split
- * and we need to move right. (If the scan key is equal to the high key,
- * we might or might not need to move right; have to scan the page first
- * anyway.)
+ * When nextkey = false (normal case): if the scan key that brought us
+ * to this page is > the high key stored on the page, then the page
+ * has split and we need to move right. (If the scan key is equal to
+ * the high key, we might or might not need to move right; have to
+ * scan the page first anyway.)
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
- * The page could even have split more than once, so scan as far as needed.
+ * The page could even have split more than once, so scan as far as
+ * needed.
*
* We also have to move right if we followed a link that brought us to a
* dead page.
* Binary search to find the first key on the page >= scan key, or
* first key > scankey when nextkey is true.
*
- * For nextkey=false (cmpval=1), the loop invariant is: all slots
- * before 'low' are < scan key, all slots at or after 'high'
- * are >= scan key.
+ * For nextkey=false (cmpval=1), the loop invariant is: all slots before
+ * 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
- * For nextkey=true (cmpval=0), the loop invariant is: all slots
- * before 'low' are <= scan key, all slots at or after 'high'
- * are > scan key.
+ * For nextkey=true (cmpval=0), the loop invariant is: all slots before
+ * 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
- * On a leaf page, we always return the first key >= scan key (resp.
- * > scan key), which could be the last slot + 1.
+ * On a leaf page, we always return the first key >= scan key (resp. >
+ * scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
/*
- * On a non-leaf page, return the last key < scan key (resp. <= scan key).
- * There must be one if _bt_compare() is playing by the rules.
+ * On a non-leaf page, return the last key < scan key (resp. <= scan
+ * key). There must be one if _bt_compare() is playing by the rules.
*/
Assert(low > P_FIRSTDATAKEY(opaque));
{
/*
* The sk_func needs to be passed the index value as left arg
- * and the sk_argument as right arg (they might be of different
- * types). Since it is convenient for callers to think of
- * _bt_compare as comparing the scankey to the index item,
- * we have to flip the sign of the comparison result.
+ * and the sk_argument as right arg (they might be of
+ * different types). Since it is convenient for callers to
+ * think of _bt_compare as comparing the scankey to the index
+ * item, we have to flip the sign of the comparison result.
*
* Note: curious-looking coding is to avoid overflow if
* comparison function returns INT_MIN. There is no risk of
bool goback;
bool continuescan;
ScanKey scankeys;
- ScanKey *startKeys = NULL;
+ ScanKey *startKeys = NULL;
int keysCount = 0;
int i;
StrategyNumber strat_total;
* We want to identify the keys that can be used as starting boundaries;
* these are =, >, or >= keys for a forward scan or =, <, <= keys for
* a backwards scan. We can use keys for multiple attributes so long as
- * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
+ * the prior attributes had only =, >= (resp. =, <=) keys. Once we accept
* a > or < boundary or find an attribute with no boundary (which can be
* thought of as the same as "> -infinity"), we can't use keys for any
* attributes to its right, because it would break our simplistic notion
ScanKey cur;
startKeys = (ScanKey *) palloc(so->numberOfKeys * sizeof(ScanKey));
+
/*
- * chosen is the so-far-chosen key for the current attribute, if any.
- * We don't cast the decision in stone until we reach keys for the
- * next attribute.
+ * chosen is the so-far-chosen key for the current attribute, if
+ * any. We don't cast the decision in stone until we reach keys
+ * for the next attribute.
*/
curattr = 1;
chosen = NULL;
+
/*
* Loop iterates from 0 to numberOfKeys inclusive; we use the last
* pass to handle after-last-key processing. Actual exit from the
if (chosen == NULL)
break;
startKeys[keysCount++] = chosen;
+
/*
- * Adjust strat_total, and quit if we have stored a > or < key.
+ * Adjust strat_total, and quit if we have stored a > or <
+ * key.
*/
strat = chosen->sk_strategy;
if (strat != BTEqualStrategyNumber)
strat == BTLessStrategyNumber)
break;
}
+
/*
* Done if that was the last attribute.
*/
if (i >= so->numberOfKeys)
break;
+
/*
* Reset for next attr, which should be in sequence.
*/
ScanKey cur = startKeys[i];
/*
- * _bt_preprocess_keys disallows it, but it's place to add some code
- * later
+ * _bt_preprocess_keys disallows it, but it's place to add some
+ * code later
*/
if (cur->sk_flags & SK_ISNULL)
{
elog(ERROR, "btree doesn't support is(not)null, yet");
return false;
}
+
/*
* If scankey operator is of default subtype, we can use the
- * cached comparison procedure; otherwise gotta look it up in
- * the catalogs.
+ * cached comparison procedure; otherwise gotta look it up in the
+ * catalogs.
*/
if (cur->sk_subtype == InvalidOid)
{
/*
* Examine the selected initial-positioning strategy to determine
- * exactly where we need to start the scan, and set flag variables
- * to control the code below.
+ * exactly where we need to start the scan, and set flag variables to
+ * control the code below.
*
- * If nextkey = false, _bt_search and _bt_binsrch will locate the
- * first item >= scan key. If nextkey = true, they will locate the
- * first item > scan key.
+ * If nextkey = false, _bt_search and _bt_binsrch will locate the first
+ * item >= scan key. If nextkey = true, they will locate the first
+ * item > scan key.
*
- * If goback = true, we will then step back one item, while if
- * goback = false, we will start the scan on the located item.
+ * If goback = true, we will then step back one item, while if goback =
+ * false, we will start the scan on the located item.
*
* it's yet other place to add some code later for is(not)null ...
*/
switch (strat_total)
{
case BTLessStrategyNumber:
+
/*
- * Find first item >= scankey, then back up one to arrive at last
- * item < scankey. (Note: this positioning strategy is only used
- * for a backward scan, so that is always the correct starting
- * position.)
+ * Find first item >= scankey, then back up one to arrive at
+ * last item < scankey. (Note: this positioning strategy is
+ * only used for a backward scan, so that is always the
+ * correct starting position.)
*/
nextkey = false;
goback = true;
break;
case BTLessEqualStrategyNumber:
+
/*
- * Find first item > scankey, then back up one to arrive at last
- * item <= scankey. (Note: this positioning strategy is only used
- * for a backward scan, so that is always the correct starting
- * position.)
+ * Find first item > scankey, then back up one to arrive at
+ * last item <= scankey. (Note: this positioning strategy is
+ * only used for a backward scan, so that is always the
+ * correct starting position.)
*/
nextkey = true;
goback = true;
break;
case BTEqualStrategyNumber:
+
/*
* If a backward scan was specified, need to start with last
* equal item not first one.
if (ScanDirectionIsBackward(dir))
{
/*
- * This is the same as the <= strategy. We will check
- * at the end whether the found item is actually =.
+ * This is the same as the <= strategy. We will check at
+ * the end whether the found item is actually =.
*/
nextkey = true;
goback = true;
else
{
/*
- * This is the same as the >= strategy. We will check
- * at the end whether the found item is actually =.
+ * This is the same as the >= strategy. We will check at
+ * the end whether the found item is actually =.
*/
nextkey = false;
goback = false;
break;
case BTGreaterEqualStrategyNumber:
+
/*
- * Find first item >= scankey. (This is only used for
- * forward scans.)
+ * Find first item >= scankey. (This is only used for forward
+ * scans.)
*/
nextkey = false;
goback = false;
break;
case BTGreaterStrategyNumber:
+
/*
- * Find first item > scankey. (This is only used for
- * forward scans.)
+ * Find first item > scankey. (This is only used for forward
+ * scans.)
*/
nextkey = true;
goback = false;
pfree(scankeys);
/*
- * If nextkey = false, we are positioned at the first item >= scan key,
- * or possibly at the end of a page on which all the existing items are
- * less than the scan key and we know that everything on later pages
- * is greater than or equal to scan key.
+ * If nextkey = false, we are positioned at the first item >= scan
+ * key, or possibly at the end of a page on which all the existing
+ * items are less than the scan key and we know that everything on
+ * later pages is greater than or equal to scan key.
*
- * If nextkey = true, we are positioned at the first item > scan key,
- * or possibly at the end of a page on which all the existing items are
+ * If nextkey = true, we are positioned at the first item > scan key, or
+ * possibly at the end of a page on which all the existing items are
* less than or equal to the scan key and we know that everything on
* later pages is greater than scan key.
*
* The actually desired starting point is either this item or the prior
- * one, or in the end-of-page case it's the first item on the next page
- * or the last item on this page. We apply _bt_step if needed to get to
- * the right place.
+ * one, or in the end-of-page case it's the first item on the next
+ * page or the last item on this page. We apply _bt_step if needed to
+ * get to the right place.
*
- * If _bt_step fails (meaning we fell off the end of the index in
- * one direction or the other), then there are no matches so we just
+ * If _bt_step fails (meaning we fell off the end of the index in one
+ * direction or the other), then there are no matches so we just
* return false.
*/
if (goback)
itup = &(btitem->bti_itup);
/*
- * Okay, we are on the first or last tuple. Does it pass all the quals?
+ * Okay, we are on the first or last tuple. Does it pass all the
+ * quals?
*/
if (_bt_checkkeys(scan, itup, dir, &continuescan))
{
*
* Since the index will never be used unless it is completely built,
* from a crash-recovery point of view there is no need to WAL-log the
- * steps of the build. After completing the index build, we can just sync
+ * steps of the build. After completing the index build, we can just sync
* the whole file to disk using smgrimmedsync() before exiting this module.
* This can be seen to be sufficient for crash recovery by considering that
* it's effectively equivalent to what would happen if a CHECKPOINT occurred
- * just after the index build. However, it is clearly not sufficient if the
+ * just after the index build. However, it is clearly not sufficient if the
* DBA is using the WAL log for PITR or replication purposes, since another
* machine would not be able to reconstruct the index from WAL. Therefore,
* we log the completed index pages to WAL if and only if WAL archiving is
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.87 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.88 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct BTPageState
{
Page btps_page; /* workspace for page building */
- BlockNumber btps_blkno; /* block # to write this page at */
+ BlockNumber btps_blkno; /* block # to write this page at */
BTItem btps_minkey; /* copy of minimum key (first item) on
* page */
OffsetNumber btps_lastoff; /* last item offset loaded */
typedef struct BTWriteState
{
Relation index;
- bool btws_use_wal; /* dump pages to WAL? */
- BlockNumber btws_pages_alloced; /* # pages allocated */
- BlockNumber btws_pages_written; /* # pages written out */
- Page btws_zeropage; /* workspace for filling zeroes */
+ bool btws_use_wal; /* dump pages to WAL? */
+ BlockNumber btws_pages_alloced; /* # pages allocated */
+ BlockNumber btws_pages_written; /* # pages written out */
+ Page btws_zeropage; /* workspace for filling zeroes */
} BTWriteState;
static void _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti);
static void _bt_uppershutdown(BTWriteState *wstate, BTPageState *state);
static void _bt_load(BTWriteState *wstate,
- BTSpool *btspool, BTSpool *btspool2);
+ BTSpool *btspool, BTSpool *btspool2);
/*
btspool->isunique = isunique;
/*
- * We size the sort area as maintenance_work_mem rather than work_mem to
- * speed index creation. This should be OK since a single backend can't
- * run multiple index creations in parallel. Note that creation of a
- * unique index actually requires two BTSpool objects. We expect that the
- * second one (for dead tuples) won't get very full, so we give it only
- * work_mem.
+ * We size the sort area as maintenance_work_mem rather than work_mem
+ * to speed index creation. This should be OK since a single backend
+ * can't run multiple index creations in parallel. Note that creation
+ * of a unique index actually requires two BTSpool objects. We expect
+ * that the second one (for dead tuples) won't get very full, so we
+ * give it only work_mem.
*/
btKbytes = isdead ? work_mem : maintenance_work_mem;
btspool->sortstate = tuplesort_begin_index(index, isunique,
void
_bt_leafbuild(BTSpool *btspool, BTSpool *btspool2)
{
- BTWriteState wstate;
+ BTWriteState wstate;
#ifdef BTREE_BUILD_STATS
if (log_btree_build_stats)
tuplesort_performsort(btspool2->sortstate);
wstate.index = btspool->index;
+
/*
* We need to log index creation in WAL iff WAL archiving is enabled
* AND it's not a temp index.
/* reserve the metapage */
wstate.btws_pages_alloced = BTREE_METAPAGE + 1;
wstate.btws_pages_written = 0;
- wstate.btws_zeropage = NULL; /* until needed */
+ wstate.btws_zeropage = NULL; /* until needed */
_bt_load(&wstate, btspool, btspool2);
}
static Page
_bt_blnewpage(uint32 level)
{
- Page page;
+ Page page;
BTPageOpaque opaque;
page = (Page) palloc(BLCKSZ);
* If we have to write pages nonsequentially, fill in the space with
* zeroes until we come back and overwrite. This is not logically
* necessary on standard Unix filesystems (unwritten space will read
- * as zeroes anyway), but it should help to avoid fragmentation.
- * The dummy pages aren't WAL-logged though.
+ * as zeroes anyway), but it should help to avoid fragmentation. The
+ * dummy pages aren't WAL-logged though.
*/
while (blkno > wstate->btws_pages_written)
{
}
/*
- * Now write the page. We say isTemp = true even if it's not a
- * temp index, because there's no need for smgr to schedule an fsync
- * for this write; we'll do it ourselves before ending the build.
+ * Now write the page. We say isTemp = true even if it's not a temp
+ * index, because there's no need for smgr to schedule an fsync for
+ * this write; we'll do it ourselves before ending the build.
*/
smgrwrite(wstate->index->rd_smgr, blkno, (char *) page, true);
_bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
{
Page npage;
- BlockNumber nblkno;
+ BlockNumber nblkno;
OffsetNumber last_off;
Size pgspc;
Size btisz;
* already. Finish off the page and write it out.
*/
Page opage = npage;
- BlockNumber oblkno = nblkno;
+ BlockNumber oblkno = nblkno;
ItemId ii;
ItemId hii;
BTItem obti;
((PageHeader) opage)->pd_lower -= sizeof(ItemIdData);
/*
- * Link the old page into its parent, using its minimum key. If
- * we don't have a parent, we have to create one; this adds a new
+ * Link the old page into its parent, using its minimum key. If we
+ * don't have a parent, we have to create one; this adds a new
* btree level.
*/
if (state->btps_next == NULL)
}
/*
- * Write out the old page. We never need to touch it again,
- * so we can free the opage workspace too.
+ * Write out the old page. We never need to touch it again, so we
+ * can free the opage workspace too.
*/
_bt_blwritepage(wstate, opage, oblkno);
_bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
{
BTPageState *s;
- BlockNumber rootblkno = P_NONE;
+ BlockNumber rootblkno = P_NONE;
uint32 rootlevel = 0;
Page metapage;
/*
* As the last step in the process, construct the metapage and make it
- * point to the new root (unless we had no data at all, in which case it's
- * set to point to "P_NONE"). This changes the index to the "valid"
- * state by filling in a valid magic number in the metapage.
+ * point to the new root (unless we had no data at all, in which case
+ * it's set to point to "P_NONE"). This changes the index to the
+ * "valid" state by filling in a valid magic number in the metapage.
*/
metapage = (Page) palloc(BLCKSZ);
_bt_initmetapage(metapage, rootblkno, rootlevel);
compare = DatumGetInt32(FunctionCall2(&entry->sk_func,
attrDatum1,
- attrDatum2));
+ attrDatum2));
if (compare > 0)
{
load1 = false;
if (should_free)
pfree((void *) bti);
bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
- true, &should_free);
+ true, &should_free);
}
else
{
if (should_free2)
pfree((void *) bti2);
bti2 = (BTItem) tuplesort_getindextuple(btspool2->sortstate,
- true, &should_free2);
+ true, &should_free2);
}
}
_bt_freeskey(indexScanKey);
{
/* merge is unnecessary */
while ((bti = (BTItem) tuplesort_getindextuple(btspool->sortstate,
- true, &should_free)) != NULL)
+ true, &should_free)) != NULL)
{
/* When we see first tuple, create first index page */
if (state == NULL)
/*
* If the index isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp index we don't care
+ * safe to commit the transaction. (For a temp index we don't care
* since the index will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the build.
- * It's less obvious that we have to do it even if we did WAL-log the
- * index pages. The reason is that since we're building outside
- * shared buffers, a CHECKPOINT occurring during the build has no way
- * to flush the previously written data to disk (indeed it won't know
- * the index even exists). A crash later on would replay WAL from the
+ * It's obvious that we must do this when not WAL-logging the build. It's
+ * less obvious that we have to do it even if we did WAL-log the index
+ * pages. The reason is that since we're building outside shared
+ * buffers, a CHECKPOINT occurring during the build has no way to
+ * flush the previously written data to disk (indeed it won't know the
+ * index even exists). A crash later on would replay WAL from the
* checkpoint, therefore it wouldn't replay our earlier WAL entries.
- * If we do not fsync those pages here, they might still not be on disk
- * when the crash occurs.
+ * If we do not fsync those pages here, they might still not be on
+ * disk when the crash occurs.
*/
if (!wstate->index->rd_istemp)
smgrimmedsync(wstate->index->rd_smgr);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.59 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.60 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool null;
/*
- * We can use the cached (default) support procs since no cross-type
- * comparison can be needed.
+ * We can use the cached (default) support procs since no
+ * cross-type comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
arg = index_getattr(itup, i + 1, itupdesc, &null);
/*
* _bt_mkscankey_nodata
* Build a scan key that contains comparator routines appropriate to
- * the key datatypes, but no comparison data. The comparison data
+ * the key datatypes, but no comparison data. The comparison data
* ultimately used must match the key datatypes.
*
* The result cannot be used with _bt_compare(). Currently this
FmgrInfo *procinfo;
/*
- * We can use the cached (default) support procs since no cross-type
- * comparison can be needed.
+ * We can use the cached (default) support procs since no
+ * cross-type comparison can be needed.
*/
procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
ScanKeyEntryInitializeWithInfo(&skey[i],
* _bt_preprocess_keys() -- Preprocess scan keys
*
* The caller-supplied keys (in scan->keyData[]) are copied to
- * so->keyData[] with possible transformation. scan->numberOfKeys is
+ * so->keyData[] with possible transformation. scan->numberOfKeys is
* the number of input keys, so->numberOfKeys gets the number of output
* keys (possibly less, never greater).
*
* The primary purpose of this routine is to discover how many scan keys
- * must be satisfied to continue the scan. It also attempts to eliminate
+ * must be satisfied to continue the scan. It also attempts to eliminate
* redundant keys and detect contradictory keys. At present, redundant and
* contradictory keys can only be detected for same-data-type comparisons,
* but that's the usual case so it seems worth doing.
* or one or two boundary-condition keys for each attr.) However, we can
* only detect redundant keys when the right-hand datatypes are all equal
* to the index datatype, because we do not know suitable operators for
- * comparing right-hand values of two different datatypes. (In theory
+ * comparing right-hand values of two different datatypes. (In theory
* we could handle comparison of a RHS of the index datatype with a RHS of
* another type, but that seems too much pain for too little gain.) So,
* keys whose operator has a nondefault subtype (ie, its RHS is not of the
*
* xform[i] points to the currently best scan key of strategy type i+1,
* if any is found with a default operator subtype; it is NULL if we
- * haven't yet found such a key for this attr. Scan keys of nondefault
- * subtypes are transferred to the output with no processing except for
- * noting if they are of "=" type.
+ * haven't yet found such a key for this attr. Scan keys of
+ * nondefault subtypes are transferred to the output with no
+ * processing except for noting if they are of "=" type.
*/
attno = 1;
memset(xform, 0, sizeof(xform));
/*
* If no "=" for this key, we're done with required keys
*/
- if (! hasOtherTypeEqual)
+ if (!hasOtherTypeEqual)
allEqualSoFar = false;
}
if (xform[BTLessStrategyNumber - 1]
&& xform[BTLessEqualStrategyNumber - 1])
{
- ScanKey lt = xform[BTLessStrategyNumber - 1];
- ScanKey le = xform[BTLessEqualStrategyNumber - 1];
+ ScanKey lt = xform[BTLessStrategyNumber - 1];
+ ScanKey le = xform[BTLessEqualStrategyNumber - 1];
test = FunctionCall2(&le->sk_func,
lt->sk_argument,
if (xform[BTGreaterStrategyNumber - 1]
&& xform[BTGreaterEqualStrategyNumber - 1])
{
- ScanKey gt = xform[BTGreaterStrategyNumber - 1];
- ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
+ ScanKey gt = xform[BTGreaterStrategyNumber - 1];
+ ScanKey ge = xform[BTGreaterEqualStrategyNumber - 1];
test = FunctionCall2(&ge->sk_func,
gt->sk_argument,
{
/*
* Tuple fails this qual. If it's a required qual, then we
- * may be able to conclude no further tuples will pass, either.
- * We have to look at the scan direction and the qual type.
+ * may be able to conclude no further tuples will pass,
+ * either. We have to look at the scan direction and the qual
+ * type.
*
* Note: the only case in which we would keep going after failing
- * a required qual is if there are partially-redundant quals that
- * _bt_preprocess_keys() was unable to eliminate. For example,
- * given "x > 4 AND x > 10" where both are cross-type comparisons
- * and so not removable, we might start the scan at the x = 4
- * boundary point. The "x > 10" condition will fail until we
- * pass x = 10, but we must not stop the scan on its account.
+ * a required qual is if there are partially-redundant quals
+ * that _bt_preprocess_keys() was unable to eliminate. For
+ * example, given "x > 4 AND x > 10" where both are cross-type
+ * comparisons and so not removable, we might start the scan
+ * at the x = 4 boundary point. The "x > 10" condition will
+ * fail until we pass x = 10, but we must not stop the scan on
+ * its account.
*
- * Note: because we stop the scan as soon as any required equality
- * qual fails, it is critical that equality quals be used for the
- * initial positioning in _bt_first() when they are available.
- * See comments in _bt_first().
+ * Note: because we stop the scan as soon as any required
+ * equality qual fails, it is critical that equality quals be
+ * used for the initial positioning in _bt_first() when they
+ * are available. See comments in _bt_first().
*/
if (ikey < so->numberOfRequiredKeys)
{
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.17 2004/08/29 04:12:21 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/nbtree/nbtxlog.c,v 1.18 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
out_target(char *buf, xl_btreetid *target)
{
sprintf(buf + strlen(buf), "rel %u/%u/%u; tid %u/%u",
- target->node.spcNode, target->node.dbNode, target->node.relNode,
+ target->node.spcNode, target->node.dbNode, target->node.relNode,
ItemPointerGetBlockNumber(&(target->tid)),
ItemPointerGetOffsetNumber(&(target->tid)));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.54 2004/08/29 04:12:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/rtree/rtscan.c,v 1.55 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid int_oper;
RegProcedure int_proc;
- opclass = s->indexRelation->rd_index->indclass[attno-1];
+ opclass = s->indexRelation->rd_index->indclass[attno - 1];
int_strategy = RTMapToInternalOperator(s->keyData[i].sk_strategy);
int_oper = get_opclass_member(opclass,
s->keyData[i].sk_subtype,
void
ReleaseResources_rtree(void)
{
- RTScanList l;
- RTScanList prev;
- RTScanList next;
+ RTScanList l;
+ RTScanList prev;
+ RTScanList next;
/*
- * Note: this should be a no-op during normal query shutdown.
- * However, in an abort situation ExecutorEnd is not called and so
- * there may be open index scans to clean up.
+ * Note: this should be a no-op during normal query shutdown. However,
+ * in an abort situation ExecutorEnd is not called and so there may be
+ * open index scans to clean up.
*/
prev = NULL;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.24 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/clog.c,v 1.25 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Link to shared-memory data structures for CLOG control
*/
static SlruCtlData ClogCtlData;
+
#define ClogCtl (&ClogCtlData)
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.20 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.21 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* segment and page numbers in SimpleLruTruncate (see PagePrecedes()).
*
* Note: this file currently assumes that segment file names will be four
- * hex digits. This sets a lower bound on the segment size (64K transactions
+ * hex digits. This sets a lower bound on the segment size (64K transactions
* for 32-bit TransactionIds).
*/
#define SLRU_PAGES_PER_SEGMENT 32
*/
typedef struct SlruFlushData
{
- int num_files; /* # files actually open */
- int fd[NUM_SLRU_BUFFERS]; /* their FD's */
- int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */
+ int num_files; /* # files actually open */
+ int fd[NUM_SLRU_BUFFERS]; /* their FD's */
+ int segno[NUM_SLRU_BUFFERS]; /* their log seg#s */
} SlruFlushData;
/*
static bool SlruPhysicalReadPage(SlruCtl ctl, int pageno, int slotno);
static bool SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno,
- SlruFlush fdata);
+ SlruFlush fdata);
static void SlruReportIOError(SlruCtl ctl, int pageno, TransactionId xid);
static int SlruSelectLRUPage(SlruCtl ctl, int pageno);
/* If we failed, and we're in a flush, better close the files */
if (!ok && fdata)
{
- int i;
+ int i;
for (i = 0; i < fdata->num_files; i++)
close(fdata->fd[i]);
*/
if (fdata)
{
- int i;
+ int i;
for (i = 0; i < fdata->num_files; i++)
{
{
/*
* If the file doesn't already exist, we should create it. It is
- * possible for this to need to happen when writing a page that's not
- * first in its segment; we assume the OS can cope with that.
- * (Note: it might seem that it'd be okay to create files only when
- * SimpleLruZeroPage is called for the first page of a segment.
- * However, if after a crash and restart the REDO logic elects to
- * replay the log from a checkpoint before the latest one, then it's
- * possible that we will get commands to set transaction status of
- * transactions that have already been truncated from the commit log.
- * Easiest way to deal with that is to accept references to
- * nonexistent files here and in SlruPhysicalReadPage.)
+ * possible for this to need to happen when writing a page that's
+ * not first in its segment; we assume the OS can cope with that.
+ * (Note: it might seem that it'd be okay to create files only
+ * when SimpleLruZeroPage is called for the first page of a
+ * segment. However, if after a crash and restart the REDO logic
+ * elects to replay the log from a checkpoint before the latest
+ * one, then it's possible that we will get commands to set
+ * transaction status of transactions that have already been
+ * truncated from the commit log. Easiest way to deal with that is
+ * to accept references to nonexistent files here and in
+ * SlruPhysicalReadPage.)
*/
SlruFileName(ctl, path, segno);
fd = BasicOpenFile(path, O_RDWR | PG_BINARY, S_IRUSR | S_IWUSR);
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("could not seek in file \"%s\" to offset %u: %m",
- path, offset)));
+ errdetail("could not seek in file \"%s\" to offset %u: %m",
+ path, offset)));
break;
case SLRU_READ_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("could not read from file \"%s\" at offset %u: %m",
- path, offset)));
+ errdetail("could not read from file \"%s\" at offset %u: %m",
+ path, offset)));
break;
case SLRU_WRITE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("could not write to file \"%s\" at offset %u: %m",
- path, offset)));
+ errdetail("could not write to file \"%s\" at offset %u: %m",
+ path, offset)));
break;
case SLRU_FSYNC_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("could not fsync file \"%s\": %m",
- path)));
+ errdetail("could not fsync file \"%s\": %m",
+ path)));
break;
case SLRU_CLOSE_FAILED:
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not access status of transaction %u", xid),
- errdetail("could not close file \"%s\": %m",
- path)));
+ errdetail("could not close file \"%s\": %m",
+ path)));
break;
default:
/* can't get here, we trust */
/*
* Scan shared memory and remove any pages preceding the cutoff page,
* to ensure we won't rewrite them later. (Since this is normally
- * called in or just after a checkpoint, any dirty pages should
- * have been flushed already ... we're just being extra careful here.)
+ * called in or just after a checkpoint, any dirty pages should have
+ * been flushed already ... we're just being extra careful here.)
*/
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
errno = 0;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
*
* The pg_subtrans manager is a pg_clog-like manager that stores the parent
* transaction Id for each transaction. It is a fundamental part of the
- * nested transactions implementation. A main transaction has a parent
+ * nested transactions implementation. A main transaction has a parent
* of InvalidTransactionId, and each subtransaction has its immediate parent.
* The tree can easily be walked from child to parent, but not in the
* opposite direction.
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.4 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.5 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Link to shared-memory data structures for SUBTRANS control
*/
static SlruCtlData SubTransCtlData;
+
#define SubTransCtl (&SubTransCtlData)
int entryno = TransactionIdToEntry(xid);
int slotno;
TransactionId *ptr;
- TransactionId parent;
+ TransactionId parent;
/* Can't ask about stuff that might not be around anymore */
Assert(TransactionIdFollowsOrEquals(xid, RecentXmin));
SubTransGetTopmostTransaction(TransactionId xid)
{
TransactionId parentXid = xid,
- previousXid = xid;
+ previousXid = xid;
/* Can't ask about stuff that might not be around anymore */
Assert(TransactionIdFollowsOrEquals(xid, RecentXmin));
* must have been called already.)
*
* Note: it's not really necessary to create the initial segment now,
- * since slru.c would create it on first write anyway. But we may as well
+ * since slru.c would create it on first write anyway. But we may as well
* do it to be sure the directory is set up correctly.
*/
void
int startPage;
/*
- * Since we don't expect pg_subtrans to be valid across crashes,
- * we initialize the currently-active page to zeroes during startup.
+ * Since we don't expect pg_subtrans to be valid across crashes, we
+ * initialize the currently-active page to zeroes during startup.
* Whenever we advance into a new page, ExtendSUBTRANS will likewise
- * zero the new page without regard to whatever was previously on disk.
+ * zero the new page without regard to whatever was previously on
+ * disk.
*/
LWLockAcquire(SubtransControlLock, LW_EXCLUSIVE);
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view.
- * We do it merely as a debugging aid.
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
/*
* Flush dirty SUBTRANS pages to disk
*
- * This is not actually necessary from a correctness point of view.
- * We do it merely to improve the odds that writing of dirty pages is done
+ * This is not actually necessary from a correctness point of view. We do
+ * it merely to improve the odds that writing of dirty pages is done
* by the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.60 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.61 2004/08/29 05:06:40 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
static void
TransactionLogMultiUpdate(int nxids, TransactionId *xids, XidStatus status)
{
- int i;
+ int i;
Assert(nxids != 0);
return true;
/*
- * If it's marked subcommitted, we have to check the parent recursively.
- * However, if it's older than RecentXmin, we can't look at pg_subtrans;
- * instead assume that the parent crashed without cleaning up its children.
+ * If it's marked subcommitted, we have to check the parent
+ * recursively. However, if it's older than RecentXmin, we can't look
+ * at pg_subtrans; instead assume that the parent crashed without
+ * cleaning up its children.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
{
return TransactionIdDidCommit(parentXid);
}
- /*
+ /*
* It's not committed.
*/
return false;
return true;
/*
- * If it's marked subcommitted, we have to check the parent recursively.
- * However, if it's older than RecentXmin, we can't look at pg_subtrans;
- * instead assume that the parent crashed without cleaning up its children.
+ * If it's marked subcommitted, we have to check the parent
+ * recursively. However, if it's older than RecentXmin, we can't look
+ * at pg_subtrans; instead assume that the parent crashed without
+ * cleaning up its children.
*/
if (xidstatus == TRANSACTION_STATUS_SUB_COMMITTED)
{
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.58 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.59 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
xid = ShmemVariableCache->nextXid;
/*
- * If we are allocating the first XID of a new page of the commit
- * log, zero out that commit-log page before returning. We must do
- * this while holding XidGenLock, else another xact could acquire and
+ * If we are allocating the first XID of a new page of the commit log,
+ * zero out that commit-log page before returning. We must do this
+ * while holding XidGenLock, else another xact could acquire and
* commit a later XID before we zero the page. Fortunately, a page of
* the commit log holds 32K or more transactions, so we don't have to
* do this very often.
/*
* Now advance the nextXid counter. This must not happen until after
- * we have successfully completed ExtendCLOG() --- if that routine fails,
- * we want the next incoming transaction to try it again. We cannot
- * assign more XIDs until there is CLOG space for them.
+ * we have successfully completed ExtendCLOG() --- if that routine
+ * fails, we want the next incoming transaction to try it again. We
+ * cannot assign more XIDs until there is CLOG space for them.
*/
TransactionIdAdvance(ShmemVariableCache->nextXid);
/*
- * We must store the new XID into the shared PGPROC array before releasing
- * XidGenLock. This ensures that when GetSnapshotData calls
+ * We must store the new XID into the shared PGPROC array before
+ * releasing XidGenLock. This ensures that when GetSnapshotData calls
* ReadNewTransactionId, all active XIDs before the returned value of
- * nextXid are already present in PGPROC. Else we have a race condition.
+ * nextXid are already present in PGPROC. Else we have a race
+ * condition.
*
* XXX by storing xid into MyProc without acquiring SInvalLock, we are
* relying on fetch/store of an xid to be atomic, else other backends
*
* A solution to the atomic-store problem would be to give each PGPROC
* its own spinlock used only for fetching/storing that PGPROC's xid
- * and related fields. (SInvalLock would then mean primarily that
+ * and related fields. (SInvalLock would then mean primarily that
* PGPROCs couldn't be added/removed while holding the lock.)
*
* If there's no room to fit a subtransaction XID into PGPROC, set the
* cache-overflowed flag instead. This forces readers to look in
- * pg_subtrans to map subtransaction XIDs up to top-level XIDs.
- * There is a race-condition window, in that the new XID will not
- * appear as running until its parent link has been placed into
- * pg_subtrans. However, that will happen before anyone could possibly
- * have a reason to inquire about the status of the XID, so it seems
- * OK. (Snapshots taken during this window *will* include the parent
- * XID, so they will deliver the correct answer later on when someone
- * does have a reason to inquire.)
+ * pg_subtrans to map subtransaction XIDs up to top-level XIDs. There
+ * is a race-condition window, in that the new XID will not appear as
+ * running until its parent link has been placed into pg_subtrans.
+ * However, that will happen before anyone could possibly have a
+ * reason to inquire about the status of the XID, so it seems OK.
+ * (Snapshots taken during this window *will* include the parent XID,
+ * so they will deliver the correct answer later on when someone does
+ * have a reason to inquire.)
*/
if (MyProc != NULL)
{
MyProc->subxids.nxids++;
}
else
- {
MyProc->subxids.overflowed = true;
- }
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.182 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.183 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct TransactionStateData
{
- TransactionId transactionIdData; /* my XID */
- char *name; /* savepoint name, if any */
- int savepointLevel; /* savepoint level */
- CommandId commandId; /* current CID */
- TransState state; /* low-level state */
- TBlockState blockState; /* high-level state */
- int nestingLevel; /* nest depth */
- MemoryContext curTransactionContext; /* my xact-lifetime context */
- ResourceOwner curTransactionOwner; /* my query resources */
- List *childXids; /* subcommitted child XIDs */
- AclId currentUser; /* subxact start current_user */
- bool prevXactReadOnly; /* entry-time xact r/o state */
- struct TransactionStateData *parent; /* back link to parent */
+ TransactionId transactionIdData; /* my XID */
+ char *name; /* savepoint name, if any */
+ int savepointLevel; /* savepoint level */
+ CommandId commandId; /* current CID */
+ TransState state; /* low-level state */
+ TBlockState blockState; /* high-level state */
+ int nestingLevel; /* nest depth */
+ MemoryContext curTransactionContext; /* my xact-lifetime
+ * context */
+ ResourceOwner curTransactionOwner; /* my query resources */
+ List *childXids; /* subcommitted child XIDs */
+ AclId currentUser; /* subxact start current_user */
+ bool prevXactReadOnly; /* entry-time xact r/o state */
+ struct TransactionStateData *parent; /* back link to parent */
} TransactionStateData;
typedef TransactionStateData *TransactionState;
* This does not change as we enter and exit subtransactions, so we don't
* keep it inside the TransactionState stack.
*/
-static AbsoluteTime xactStartTime; /* integer part */
-static int xactStartTimeUsec; /* microsecond part */
+static AbsoluteTime xactStartTime; /* integer part */
+static int xactStartTimeUsec; /* microsecond part */
/*
{
TransactionState s = CurrentTransactionState;
- if (s->blockState == TBLOCK_ABORT ||
+ if (s->blockState == TBLOCK_ABORT ||
s->blockState == TBLOCK_SUBABORT)
return true;
}
/*
- * We will return true for the Xid of the current subtransaction,
- * any of its subcommitted children, any of its parents, or any of
- * their previously subcommitted children. However, a transaction
- * being aborted is no longer "current", even though it may still
- * have an entry on the state stack.
+ * We will return true for the Xid of the current subtransaction, any
+ * of its subcommitted children, any of its parents, or any of their
+ * previously subcommitted children. However, a transaction being
+ * aborted is no longer "current", even though it may still have an
+ * entry on the state stack.
*/
for (s = CurrentTransactionState; s != NULL; s = s->parent)
{
- ListCell *cell;
+ ListCell *cell;
if (s->state == TRANS_ABORT)
continue;
Assert(CurTransactionContext != NULL);
/*
- * Create a CurTransactionContext, which will be used to hold data that
- * survives subtransaction commit but disappears on subtransaction abort.
- * We make it a child of the immediate parent's CurTransactionContext.
+ * Create a CurTransactionContext, which will be used to hold data
+ * that survives subtransaction commit but disappears on
+ * subtransaction abort. We make it a child of the immediate parent's
+ * CurTransactionContext.
*/
CurTransactionContext = AllocSetContextCreate(CurTransactionContext,
"CurTransactionContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
s->curTransactionContext = CurTransactionContext;
/* Make the CurTransactionContext active. */
Assert(s->parent != NULL);
/*
- * Create a resource owner for the subtransaction. We make it a
- * child of the immediate parent's resource owner.
+ * Create a resource owner for the subtransaction. We make it a child
+ * of the immediate parent's resource owner.
*/
s->curTransactionOwner =
ResourceOwnerCreate(s->parent->curTransactionOwner,
nchildren = xactGetCommittedChildren(&children);
/*
- * If we made neither any XLOG entries nor any temp-rel updates,
- * and have no files to be deleted, we can omit recording the transaction
+ * If we made neither any XLOG entries nor any temp-rel updates, and
+ * have no files to be deleted, we can omit recording the transaction
* commit at all. (This test includes the effects of subtransactions,
- * so the presence of committed subxacts need not alone force a write.)
+ * so the presence of committed subxacts need not alone force a
+ * write.)
*/
if (MyXactMadeXLogEntry || MyXactMadeTempRelUpdate || nrels > 0)
{
START_CRIT_SECTION();
/*
- * If our transaction made any transaction-controlled XLOG entries,
- * we need to lock out checkpoint start between writing our XLOG
- * record and updating pg_clog. Otherwise it is possible for the
- * checkpoint to set REDO after the XLOG record but fail to flush the
- * pg_clog update to disk, leading to loss of the transaction commit
- * if we crash a little later. Slightly klugy fix for problem
- * discovered 2004-08-10.
+ * If our transaction made any transaction-controlled XLOG
+ * entries, we need to lock out checkpoint start between writing
+ * our XLOG record and updating pg_clog. Otherwise it is possible
+ * for the checkpoint to set REDO after the XLOG record but fail
+ * to flush the pg_clog update to disk, leading to loss of the
+ * transaction commit if we crash a little later. Slightly klugy
+ * fix for problem discovered 2004-08-10.
*
* (If it made no transaction-controlled XLOG entries, its XID
- * appears nowhere in permanent storage, so no one else will ever care
- * if it committed; so it doesn't matter if we lose the commit flag.)
+ * appears nowhere in permanent storage, so no one else will ever
+ * care if it committed; so it doesn't matter if we lose the
+ * commit flag.)
*
* Note we only need a shared lock.
*/
RecordSubTransactionCommit(void)
{
/*
- * We do not log the subcommit in XLOG; it doesn't matter until
- * the top-level transaction commits.
+ * We do not log the subcommit in XLOG; it doesn't matter until the
+ * top-level transaction commits.
*
* We must mark the subtransaction subcommitted in clog if its XID
* appears either in permanent rels or in local temporary rels. We
- * test this by seeing if we made transaction-controlled entries
- * *OR* local-rel tuple updates. (The test here actually covers the
- * entire transaction tree so far, so it may mark subtransactions that
- * don't really need it, but it's probably not worth being tenser.
- * Note that if a prior subtransaction dirtied these variables, then
+ * test this by seeing if we made transaction-controlled entries *OR*
+ * local-rel tuple updates. (The test here actually covers the entire
+ * transaction tree so far, so it may mark subtransactions that don't
+ * really need it, but it's probably not worth being tenser. Note that
+ * if a prior subtransaction dirtied these variables, then
* RecordTransactionCommit will have to do the full pushup anyway...)
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate)
{
- TransactionId xid = GetCurrentTransactionId();
+ TransactionId xid = GetCurrentTransactionId();
/* XXX does this really need to be a critical section? */
START_CRIT_SECTION();
{
int nrels;
RelFileNode *rptr;
- int nchildren;
- TransactionId *children;
+ int nchildren;
+ TransactionId *children;
/* Get data needed for abort record */
nrels = smgrGetPendingDeletes(false, &rptr);
/*
* If we made neither any transaction-controlled XLOG entries nor any
- * temp-rel updates, and are not going to delete any files, we can omit
- * recording the transaction abort at all. No one will ever care that
- * it aborted. (These tests cover our whole transaction tree.)
+ * temp-rel updates, and are not going to delete any files, we can
+ * omit recording the transaction abort at all. No one will ever care
+ * that it aborted. (These tests cover our whole transaction tree.)
*/
if (MyLastRecPtr.xrecoff != 0 || MyXactMadeTempRelUpdate || nrels > 0)
{
- TransactionId xid = GetCurrentTransactionId();
+ TransactionId xid = GetCurrentTransactionId();
/*
* Catch the scenario where we aborted partway through
* We only need to log the abort in XLOG if the transaction made
* any transaction-controlled XLOG entries or will delete files.
* (If it made no transaction-controlled XLOG entries, its XID
- * appears nowhere in permanent storage, so no one else will ever care
- * if it committed.)
+ * appears nowhere in permanent storage, so no one else will ever
+ * care if it committed.)
*
* We do not flush XLOG to disk unless deleting files, since the
- * default assumption after a crash would be that we aborted, anyway.
- * For the same reason, we don't need to worry about interlocking
- * against checkpoint start.
+ * default assumption after a crash would be that we aborted,
+ * anyway. For the same reason, we don't need to worry about
+ * interlocking against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
{
int nrels;
RelFileNode *rptr;
- TransactionId xid = GetCurrentTransactionId();
- int nchildren;
- TransactionId *children;
+ TransactionId xid = GetCurrentTransactionId();
+ int nchildren;
+ TransactionId *children;
/* Get data needed for abort record */
nrels = smgrGetPendingDeletes(false, &rptr);
/*
* If we made neither any transaction-controlled XLOG entries nor any
- * temp-rel updates, and are not going to delete any files, we can omit
- * recording the transaction abort at all. No one will ever care that
- * it aborted. (These tests cover our whole transaction tree, and
- * therefore may mark subxacts that don't really need it, but it's
+ * temp-rel updates, and are not going to delete any files, we can
+ * omit recording the transaction abort at all. No one will ever care
+ * that it aborted. (These tests cover our whole transaction tree,
+ * and therefore may mark subxacts that don't really need it, but it's
* probably not worth being tenser.)
*
* In this case we needn't worry about marking subcommitted children as
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
XLogRecData rdata[3];
- int lastrdata = 0;
+ int lastrdata = 0;
xl_xact_abort xlrec;
- XLogRecPtr recptr;
+ XLogRecPtr recptr;
xlrec.xtime = time(NULL);
xlrec.nrels = nrels;
/*
* We can immediately remove failed XIDs from PGPROC's cache of
* running child XIDs. It's easiest to do it here while we have the
- * child XID array at hand, even though in the main-transaction
- * case the equivalent work happens just after return from
+ * child XID array at hand, even though in the main-transaction case
+ * the equivalent work happens just after return from
* RecordTransactionAbort.
*/
XidCacheRemoveRunningXids(xid, nchildren, children);
s->state = TRANS_START;
/*
- * Make sure we've freed any old snapshot, and reset xact state variables
+ * Make sure we've freed any old snapshot, and reset xact state
+ * variables
*/
FreeXactSnapshot();
XactIsoLevel = DefaultXactIsoLevel;
* want to release locks at the point where any backend waiting for us
* will see our transaction as being fully cleaned up.
*
- * Resources that can be associated with individual queries are
- * handled by the ResourceOwner mechanism. The other calls here
- * are for backend-wide state.
+ * Resources that can be associated with individual queries are handled
+ * by the ResourceOwner mechanism. The other calls here are for
+ * backend-wide state.
*/
smgrDoPendingDeletes(true);
* after relcache references are dropped (see comments for
* AtEOXact_RelationCache), but before locks are released (if anyone
* is waiting for lock on a relation we've modified, we want them to
- * know about the catalog change before they start using the relation).
+ * know about the catalog change before they start using the
+ * relation).
*/
AtEOXact_Inval(true);
/*
* Reset user id which might have been changed transiently. We cannot
- * use s->currentUser, but must get the session userid from miscinit.c.
+ * use s->currentUser, but must get the session userid from
+ * miscinit.c.
*
* (Note: it is not necessary to restore session authorization here
* because that can only be changed via GUC, and GUC will take care of
- * rolling it back if need be. However, an error within a SECURITY
+ * rolling it back if need be. However, an error within a SECURITY
* DEFINER function could send control here with the wrong current
* userid.)
*/
*/
DeferredTriggerAbortXact();
AtAbort_Portals();
- AtEOXact_LargeObject(false); /* 'false' means it's abort */
+ AtEOXact_LargeObject(false); /* 'false' means it's abort */
AtAbort_Notify();
AtEOXact_UpdatePasswordFile(false);
*/
AtCleanup_Portals(); /* now safe to release portal memory */
- CurrentResourceOwner = NULL; /* and resource owner */
+ CurrentResourceOwner = NULL; /* and resource owner */
ResourceOwnerDelete(TopTransactionResourceOwner);
s->curTransactionOwner = NULL;
CurTransactionResourceOwner = NULL;
break;
/*
- * This is the case when we are somewhere in a transaction block
- * and about to start a new command. For now we do nothing
- * but someday we may do command-local resource initialization.
+ * This is the case when we are somewhere in a transaction
+ * block and about to start a new command. For now we do
+ * nothing but someday we may do command-local resource
+ * initialization.
*/
case TBLOCK_INPROGRESS:
case TBLOCK_SUBINPROGRESS:
/*
* This shouldn't happen, because it means the previous
* StartTransactionCommand didn't set the STARTED state
- * appropriately, or we didn't manage previous pending
- * abort states.
+ * appropriately, or we didn't manage previous pending abort
+ * states.
*/
case TBLOCK_DEFAULT:
case TBLOCK_SUBABORT_PENDING:
break;
/*
- * Ditto, but in a subtransaction. AbortOutOfAnyTransaction
+ * Ditto, but in a subtransaction. AbortOutOfAnyTransaction
* will do the dirty work.
*/
case TBLOCK_SUBENDABORT_ALL:
AbortOutOfAnyTransaction();
- s = CurrentTransactionState; /* changed by AbortOutOfAnyTransaction */
+ s = CurrentTransactionState; /* changed by
+ * AbortOutOfAnyTransaction
+ * */
/* AbortOutOfAnyTransaction sets the blockState */
break;
/*
* We were just issued a SAVEPOINT inside a transaction block.
- * Start a subtransaction. (DefineSavepoint already
- * did PushTransaction, so as to have someplace to put the
+ * Start a subtransaction. (DefineSavepoint already did
+ * PushTransaction, so as to have someplace to put the
* SUBBEGIN state.)
*/
case TBLOCK_SUBBEGIN:
* We were issued a RELEASE command, so we end the current
* subtransaction and return to the parent transaction.
*
- * Since RELEASE can exit multiple levels of subtransaction,
- * we must loop here until we get out of all SUBEND'ed levels.
+ * Since RELEASE can exit multiple levels of subtransaction, we
+ * must loop here until we get out of all SUBEND'ed levels.
*/
case TBLOCK_SUBEND:
- do {
+ do
+ {
CommitSubTransaction();
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
} while (s->blockState == TBLOCK_SUBEND);
break;
break;
/*
- * The current subtransaction is ending. Do the equivalent
- * of a ROLLBACK TO followed by a RELEASE command.
+ * The current subtransaction is ending. Do the equivalent of
+ * a ROLLBACK TO followed by a RELEASE command.
*/
case TBLOCK_SUBENDABORT_RELEASE:
CleanupAbortedSubTransactions(false);
break;
/*
- * The current subtransaction is ending due to a ROLLBACK
- * TO command, so close all savepoints up to the target
- * level. When finished, recreate the savepoint.
+ * The current subtransaction is ending due to a ROLLBACK TO
+ * command, so close all savepoints up to the target level.
+ * When finished, recreate the savepoint.
*/
case TBLOCK_SUBENDABORT:
{
- char *name = CleanupAbortedSubTransactions(true);
+ char *name = CleanupAbortedSubTransactions(true);
Assert(PointerIsValid(name));
DefineSavepoint(name);
- s = CurrentTransactionState; /* changed by DefineSavepoint */
+ s = CurrentTransactionState; /* changed by
+ * DefineSavepoint */
pfree(name);
/* This is the same as TBLOCK_SUBBEGIN case */
CleanupAbortedSubTransactions(bool returnName)
{
TransactionState s = CurrentTransactionState;
- char *name = NULL;
-
+ char *name = NULL;
+
AssertState(PointerIsValid(s->parent));
Assert(s->parent->blockState == TBLOCK_SUBINPROGRESS ||
s->parent->blockState == TBLOCK_INPROGRESS ||
CleanupSubTransaction();
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
while (s->blockState == TBLOCK_SUBABORT_PENDING)
{
switch (s->blockState)
{
- /*
- * we aren't in a transaction, so we do nothing.
- */
+ /*
+ * we aren't in a transaction, so we do nothing.
+ */
case TBLOCK_DEFAULT:
break;
break;
/*
- * This is the case when we are somewhere in a transaction block
- * and we've gotten a failure, so we abort the transaction and
- * set up the persistent ABORT state. We will stay in ABORT
- * until we get an "END TRANSACTION".
+ * This is the case when we are somewhere in a transaction
+ * block and we've gotten a failure, so we abort the
+ * transaction and set up the persistent ABORT state. We will
+ * stay in ABORT until we get an "END TRANSACTION".
*/
case TBLOCK_INPROGRESS:
AbortTransaction();
break;
/*
- * If we are just starting a subtransaction, put it
- * in aborted state.
+ * If we are just starting a subtransaction, put it in aborted
+ * state.
*/
case TBLOCK_SUBBEGIN:
StartAbortedSubTransaction();
break;
/*
- * If we are aborting an ending transaction,
- * we have to abort the parent transaction too.
+ * If we are aborting an ending transaction, we have to abort
+ * the parent transaction too.
*/
case TBLOCK_SUBEND:
case TBLOCK_SUBABORT_PENDING:
PopTransaction();
s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState != TBLOCK_SUBEND &&
- s->blockState != TBLOCK_SUBENDABORT);
+ s->blockState != TBLOCK_SUBENDABORT);
AbortCurrentTransaction();
break;
PopTransaction();
s = CurrentTransactionState; /* changed by pop */
Assert(s->blockState != TBLOCK_SUBEND &&
- s->blockState != TBLOCK_SUBENDABORT);
+ s->blockState != TBLOCK_SUBENDABORT);
AbortCurrentTransaction();
break;
/*
- * We are already aborting the whole transaction tree.
- * Do nothing, CommitTransactionCommand will call
+ * We are already aborting the whole transaction tree. Do
+ * nothing, CommitTransactionCommand will call
* AbortOutOfAnyTransaction and set things straight.
*/
case TBLOCK_SUBENDABORT_ALL:
IsInTransactionChain(void *stmtNode)
{
/*
- * Return true on same conditions that would make PreventTransactionChain
- * error out
+ * Return true on same conditions that would make
+ * PreventTransactionChain error out
*/
if (IsTransactionBlock())
return true;
* (mainly because it's easier to control the order that way, where needed).
*
* At transaction end, the callback occurs post-commit or post-abort, so the
- * callback functions can only do noncritical cleanup. At subtransaction
- * start, the callback is called when the subtransaction has finished
+ * callback functions can only do noncritical cleanup. At subtransaction
+ * start, the callback is called when the subtransaction has finished
* initializing.
*/
void
XactCallbackItem *item;
for (item = Xact_callbacks; item; item = item->next)
- {
(*item->callback) (event, parentXid, item->arg);
- }
}
switch (s->blockState)
{
/*
- * We are not inside a transaction block, so allow one
- * to begin.
+ * We are not inside a transaction block, so allow one to
+ * begin.
*/
case TBLOCK_STARTED:
s->blockState = TBLOCK_BEGIN;
case TBLOCK_SUBABORT:
ereport(WARNING,
(errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("there is already a transaction in progress")));
+ errmsg("there is already a transaction in progress")));
break;
/* These cases are invalid. Reject them altogether. */
switch (s->blockState)
{
- /*
- * We are in a transaction block which should commit when we
- * get to the upcoming CommitTransactionCommand() so we set the
- * state to "END". CommitTransactionCommand() will recognize this
- * and commit the transaction and return us to the default state.
- */
+ /*
+ * We are in a transaction block which should commit when we
+ * get to the upcoming CommitTransactionCommand() so we set
+ * the state to "END". CommitTransactionCommand() will
+ * recognize this and commit the transaction and return us to
+ * the default state.
+ */
case TBLOCK_INPROGRESS:
case TBLOCK_SUBINPROGRESS:
s->blockState = TBLOCK_END;
/*
* We are in a transaction block which aborted. Since the
- * AbortTransaction() was already done, we need only
- * change to the special "END ABORT" state. The upcoming
- * CommitTransactionCommand() will recognise this and then put us
- * back in the default state.
+ * AbortTransaction() was already done, we need only change to
+ * the special "END ABORT" state. The upcoming
+ * CommitTransactionCommand() will recognise this and then put
+ * us back in the default state.
*/
case TBLOCK_ABORT:
s->blockState = TBLOCK_ENDABORT;
break;
/*
- * Here we are inside an aborted subtransaction. Go to the "abort
- * the whole tree" state so that CommitTransactionCommand() calls
- * AbortOutOfAnyTransaction.
+ * Here we are inside an aborted subtransaction. Go to the
+ * "abort the whole tree" state so that
+ * CommitTransactionCommand() calls AbortOutOfAnyTransaction.
*/
case TBLOCK_SUBABORT:
s->blockState = TBLOCK_SUBENDABORT_ALL;
break;
case TBLOCK_STARTED:
+
/*
- * here, the user issued COMMIT when not inside a
- * transaction. Issue a WARNING and go to abort state. The
- * upcoming call to CommitTransactionCommand() will then put us
- * back into the default state.
+ * here, the user issued COMMIT when not inside a transaction.
+ * Issue a WARNING and go to abort state. The upcoming call
+ * to CommitTransactionCommand() will then put us back into
+ * the default state.
*/
ereport(WARNING,
(errcode(ERRCODE_NO_ACTIVE_SQL_TRANSACTION),
break;
/*
- * We are inside a failed subtransaction and we got an
- * abort command from the user. Abort processing is already
- * done, so go to the "abort all" state and
- * CommitTransactionCommand will call AbortOutOfAnyTransaction
- * to set things straight.
+ * We are inside a failed subtransaction and we got an abort
+ * command from the user. Abort processing is already done,
+ * so go to the "abort all" state and CommitTransactionCommand
+ * will call AbortOutOfAnyTransaction to set things straight.
*/
case TBLOCK_SUBABORT:
s->blockState = TBLOCK_SUBENDABORT_ALL;
break;
/*
- * We are inside a subtransaction. Abort the current
+ * We are inside a subtransaction. Abort the current
* subtransaction and go to the "abort all" state, so
* CommitTransactionCommand will call AbortOutOfAnyTransaction
* to set things straight.
void
DefineSavepoint(char *name)
{
- TransactionState s = CurrentTransactionState;
+ TransactionState s = CurrentTransactionState;
switch (s->blockState)
{
case TBLOCK_SUBINPROGRESS:
/* Normal subtransaction start */
PushTransaction();
- s = CurrentTransactionState; /* changed by push */
+ s = CurrentTransactionState; /* changed by push */
+
/*
* Note that we are allocating the savepoint name in the
- * parent transaction's CurTransactionContext, since we
- * don't yet have a transaction context for the new guy.
+ * parent transaction's CurTransactionContext, since we don't
+ * yet have a transaction context for the new guy.
*/
s->name = MemoryContextStrdup(CurTransactionContext, name);
s->blockState = TBLOCK_SUBBEGIN;
/*
* ReleaseSavepoint
- * This executes a RELEASE command.
+ * This executes a RELEASE command.
*/
void
ReleaseSavepoint(List *options)
{
- TransactionState s = CurrentTransactionState;
+ TransactionState s = CurrentTransactionState;
TransactionState target,
- xact;
- ListCell *cell;
- char *name = NULL;
+ xact;
+ ListCell *cell;
+ char *name = NULL;
/*
* Check valid block state transaction status.
break;
/*
- * We are in a non-aborted subtransaction. This is
- * the only valid case.
+ * We are in a non-aborted subtransaction. This is the only
+ * valid case.
*/
case TBLOCK_SUBINPROGRESS:
break;
break;
}
- foreach (cell, options)
+ foreach(cell, options)
{
- DefElem *elem = lfirst(cell);
+ DefElem *elem = lfirst(cell);
if (strcmp(elem->defname, "savepoint_name") == 0)
name = strVal(elem->arg);
/*
* Mark "commit pending" all subtransactions up to the target
- * subtransaction. The actual commits will happen when control
- * gets to CommitTransactionCommand.
+ * subtransaction. The actual commits will happen when control gets
+ * to CommitTransactionCommand.
*/
xact = CurrentTransactionState;
for (;;)
/*
* RollbackToSavepoint
- * This executes a ROLLBACK TO command.
+ * This executes a ROLLBACK TO command.
*/
void
RollbackToSavepoint(List *options)
{
TransactionState s = CurrentTransactionState;
TransactionState target,
- xact;
- ListCell *cell;
- char *name = NULL;
+ xact;
+ ListCell *cell;
+ char *name = NULL;
switch (s->blockState)
{
- /*
- * We can't rollback to a savepoint if there is no saveopint
- * defined.
- */
+ /*
+ * We can't rollback to a savepoint if there is no saveopint
+ * defined.
+ */
case TBLOCK_ABORT:
case TBLOCK_INPROGRESS:
ereport(ERROR,
*/
case TBLOCK_SUBABORT:
case TBLOCK_SUBINPROGRESS:
+
/*
- * Have to do AbortSubTransaction, but first check
- * if this is the right subtransaction
+ * Have to do AbortSubTransaction, but first check if this is
+ * the right subtransaction
*/
break;
break;
}
- foreach (cell, options)
+ foreach(cell, options)
{
- DefElem *elem = lfirst(cell);
+ DefElem *elem = lfirst(cell);
if (strcmp(elem->defname, "savepoint_name") == 0)
name = strVal(elem->arg);
/*
* Mark "abort pending" all subtransactions up to the target
- * subtransaction. (Except the current subtransaction!)
+ * subtransaction. (Except the current subtransaction!)
*/
xact = CurrentTransactionState;
void
BeginInternalSubTransaction(char *name)
{
- TransactionState s = CurrentTransactionState;
+ TransactionState s = CurrentTransactionState;
switch (s->blockState)
{
case TBLOCK_SUBINPROGRESS:
/* Normal subtransaction start */
PushTransaction();
- s = CurrentTransactionState; /* changed by push */
+ s = CurrentTransactionState; /* changed by push */
+
/*
* Note that we are allocating the savepoint name in the
- * parent transaction's CurTransactionContext, since we
- * don't yet have a transaction context for the new guy.
+ * parent transaction's CurTransactionContext, since we don't
+ * yet have a transaction context for the new guy.
*/
if (name)
s->name = MemoryContextStrdup(CurTransactionContext, name);
switch (s->blockState)
{
- /* Must be in a subtransaction */
+ /* Must be in a subtransaction */
case TBLOCK_SUBABORT:
case TBLOCK_SUBINPROGRESS:
break;
/*
* Get out of any transaction or nested transaction
*/
- do {
+ do
+ {
switch (s->blockState)
{
case TBLOCK_DEFAULT:
s->blockState = TBLOCK_DEFAULT;
break;
case TBLOCK_SUBBEGIN:
+
/*
- * We didn't get as far as starting the subxact, so there's
- * nothing to abort. Just pop back to parent.
+ * We didn't get as far as starting the subxact, so
+ * there's nothing to abort. Just pop back to parent.
*/
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
break;
case TBLOCK_SUBINPROGRESS:
case TBLOCK_SUBEND:
case TBLOCK_SUBABORT_PENDING:
- /* In a subtransaction, so clean it up and abort parent too */
+
+ /*
+ * In a subtransaction, so clean it up and abort parent
+ * too
+ */
AbortSubTransaction();
CleanupSubTransaction();
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
break;
case TBLOCK_SUBABORT:
case TBLOCK_SUBENDABORT_ALL:
/* As above, but AbortSubTransaction already done */
CleanupSubTransaction();
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
break;
}
} while (s->blockState != TBLOCK_DEFAULT);
{
CommitSubTransaction();
PopTransaction();
- s = CurrentTransactionState; /* changed by pop */
+ s = CurrentTransactionState; /* changed by pop */
Assert(s->state == TRANS_INPROGRESS);
}
}
/*
* IsTransactionOrTransactionBlock --- are we within either a transaction
- * or a transaction block? (The backend is only really "idle" when this
+ * or a transaction block? (The backend is only really "idle" when this
* returns false.)
*
* This should match up with IsTransactionBlock and IsTransactionState.
/*
* Generate a new Xid and record it in pg_subtrans. NB: we must make
- * the subtrans entry BEFORE the Xid appears anywhere in shared storage,
- * such as in the lock table; because until it's made the Xid may not
- * appear to be "running" to other backends. See GetNewTransactionId.
+ * the subtrans entry BEFORE the Xid appears anywhere in shared
+ * storage, such as in the lock table; because until it's made the Xid
+ * may not appear to be "running" to other backends. See
+ * GetNewTransactionId.
*/
s->transactionIdData = GetNewTransactionId(true);
*/
s->currentUser = GetUserId();
s->prevXactReadOnly = XactReadOnly;
-
+
/*
* Initialize other subsystems for new subtransaction
*/
s->state = TRANS_INPROGRESS;
/*
- * Call start-of-subxact callbacks
+ * Call start-of-subxact callbacks
*/
CallXactCallbacks(XACT_EVENT_START_SUB, s->parent->transactionIdData);
s->parent->transactionIdData);
/*
- * We need to restore the upper transaction's read-only state,
- * in case the upper is read-write while the child is read-only;
- * GUC will incorrectly think it should leave the child state in place.
+ * We need to restore the upper transaction's read-only state, in case
+ * the upper is read-write while the child is read-only; GUC will
+ * incorrectly think it should leave the child state in place.
*/
XactReadOnly = s->prevXactReadOnly;
/*
* Reset user id which might have been changed transiently. Here we
* want to restore to the userid that was current at subxact entry.
- * (As in AbortTransaction, we need not worry about the session userid.)
+ * (As in AbortTransaction, we need not worry about the session
+ * userid.)
*
* Must do this after AtEOXact_GUC to handle the case where we entered
* the subxact inside a SECURITY DEFINER function (hence current and
* session userids were different) and then session auth was changed
- * inside the subxact. GUC will reset both current and session userids
- * to the entry-time session userid. This is right in every other
- * scenario so it seems simplest to let GUC do that and fix it here.
+ * inside the subxact. GUC will reset both current and session
+ * userids to the entry-time session userid. This is right in every
+ * other scenario so it seems simplest to let GUC do that and fix it
+ * here.
*/
SetUserId(s->currentUser);
* StartAbortedSubTransaction
*
* This function is used to start a subtransaction and put it immediately
- * into aborted state. The end result should be equivalent to
+ * into aborted state. The end result should be equivalent to
* StartSubTransaction immediately followed by AbortSubTransaction.
* The reason we don't implement it just that way is that many of the backend
* modules aren't designed to handle starting a subtransaction when not
- * inside a valid transaction. Rather than making them all capable of
+ * inside a valid transaction. Rather than making them all capable of
* doing that, we just omit the paired start and abort calls in this path.
*/
static void
/* Make sure currentUser is reasonably valid */
Assert(s->parent != NULL);
s->currentUser = s->parent->currentUser;
-
+
/*
- * Initialize only what has to be there for CleanupSubTransaction to work.
+ * Initialize only what has to be there for CleanupSubTransaction to
+ * work.
*/
AtSubStart_Memory();
AtSubStart_ResourceOwner();
static void
PushTransaction(void)
{
- TransactionState p = CurrentTransactionState;
- TransactionState s;
+ TransactionState p = CurrentTransactionState;
+ TransactionState s;
/*
* We keep subtransaction state nodes in TopTransactionContext.
/* use ereport to suppress computation if msg will not be printed */
ereport(DEBUG2,
(errmsg_internal("name: %s; blockState: %13s; state: %7s, xid/cid: %u/%02u, nestlvl: %d, children: %s",
- PointerIsValid(s->name) ? s->name : "unnamed",
+ PointerIsValid(s->name) ? s->name : "unnamed",
BlockStateAsString(s->blockState),
TransStateAsString(s->state),
(unsigned int) s->transactionIdData,
/*
* xactGetCommittedChildren
*
- * Gets the list of committed children of the current transaction. The return
+ * Gets the list of committed children of the current transaction. The return
* value is the number of child transactions. *children is set to point to a
* palloc'd array of TransactionIds. If there are no subxacts, *children is
* set to NULL.
int
xactGetCommittedChildren(TransactionId **ptr)
{
- TransactionState s = CurrentTransactionState;
- int nchildren;
- TransactionId *children;
- ListCell *p;
+ TransactionState s = CurrentTransactionState;
+ int nchildren;
+ TransactionId *children;
+ ListCell *p;
nchildren = list_length(s->childXids);
if (nchildren == 0)
if (info == XLOG_XACT_COMMIT)
{
xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record);
- int i;
+ int i;
TransactionIdCommit(record->xl_xid);
/* Mark committed subtransactions as committed */
TransactionIdCommitTree(xlrec->nsubxacts,
- (TransactionId *) &(xlrec->xnodes[xlrec->nrels]));
+ (TransactionId *) &(xlrec->xnodes[xlrec->nrels]));
/* Make sure files supposed to be dropped are dropped */
for (i = 0; i < xlrec->nrels; i++)
{
else if (info == XLOG_XACT_ABORT)
{
xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record);
- int i;
+ int i;
TransactionIdAbort(record->xl_xid);
/* mark subtransactions as aborted */
TransactionIdAbortTree(xlrec->nsubxacts,
- (TransactionId *) &(xlrec->xnodes[xlrec->nrels]));
+ (TransactionId *) &(xlrec->xnodes[xlrec->nrels]));
/* Make sure files supposed to be dropped are dropped */
for (i = 0; i < xlrec->nrels; i++)
{
xact_desc(char *buf, uint8 xl_info, char *rec)
{
uint8 info = xl_info & ~XLR_INFO_MASK;
- int i;
+ int i;
if (info == XLOG_XACT_COMMIT)
{
for (i = 0; i < xlrec->nrels; i++)
{
RelFileNode rnode = xlrec->xnodes[i];
+
sprintf(buf + strlen(buf), " %u/%u/%u",
rnode.spcNode, rnode.dbNode, rnode.relNode);
}
if (xlrec->nsubxacts > 0)
{
TransactionId *xacts = (TransactionId *)
- &xlrec->xnodes[xlrec->nrels];
+ &xlrec->xnodes[xlrec->nrels];
sprintf(buf + strlen(buf), "; subxacts:");
for (i = 0; i < xlrec->nsubxacts; i++)
for (i = 0; i < xlrec->nrels; i++)
{
RelFileNode rnode = xlrec->xnodes[i];
+
sprintf(buf + strlen(buf), " %u/%u/%u",
rnode.spcNode, rnode.dbNode, rnode.relNode);
}
if (xlrec->nsubxacts > 0)
{
TransactionId *xacts = (TransactionId *)
- &xlrec->xnodes[xlrec->nrels];
+ &xlrec->xnodes[xlrec->nrels];
sprintf(buf + strlen(buf), "; subxacts:");
for (i = 0; i < xlrec->nsubxacts; i++)
}
void
-XactPushRollback(void (*func) (void *), void *data)
+ XactPushRollback(void (*func) (void *), void *data)
{
#ifdef XLOG_II
if (_RollbackFunc != NULL)
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.165 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.166 2004/08/29 05:06:40 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Are we doing recovery from XLOG? */
bool InRecovery = false;
+
/* Are we recovering using offline XLOG archives? */
-static bool InArchiveRecovery = false;
+static bool InArchiveRecovery = false;
+
/* Was the last xlog file restored from archive, or local? */
-static bool restoredFromArchive = false;
+static bool restoredFromArchive = false;
/* options taken from recovery.conf */
static char *recoveryRestoreCommand = NULL;
static bool recoveryTarget = false;
static bool recoveryTargetExact = false;
static bool recoveryTargetInclusive = true;
-static TransactionId recoveryTargetXid;
-static time_t recoveryTargetTime;
+static TransactionId recoveryTargetXid;
+static time_t recoveryTargetTime;
/* if recoveryStopsHere returns true, it saves actual stop xid/time here */
-static TransactionId recoveryStopXid;
-static time_t recoveryStopTime;
-static bool recoveryStopAfter;
+static TransactionId recoveryStopXid;
+static time_t recoveryStopTime;
+static bool recoveryStopAfter;
/* constraint set by read_backup_label */
-static XLogRecPtr recoveryMinXlogOffset = { 0, 0 };
+static XLogRecPtr recoveryMinXlogOffset = {0, 0};
/*
* During normal operation, the only timeline we care about is ThisTimeLineID.
*
* expectedTLIs: an integer list of recoveryTargetTLI and the TLIs of
* its known parents, newest first (so recoveryTargetTLI is always the
- * first list member). Only these TLIs are expected to be seen in the WAL
+ * first list member). Only these TLIs are expected to be seen in the WAL
* segments we read, and indeed only these TLIs will be considered as
* candidate WAL files to open at all.
*
* file was created.) During a sequential scan we do not allow this value
* to decrease.
*/
-static TimeLineID recoveryTargetTLI;
-static List *expectedTLIs;
-static TimeLineID curFileTLI;
+static TimeLineID recoveryTargetTLI;
+static List *expectedTLIs;
+static TimeLineID curFileTLI;
/*
* MyLastRecPtr points to the start of the last XLOG record inserted by the
/* File path names */
-char XLogDir[MAXPGPATH];
+char XLogDir[MAXPGPATH];
static char ControlFilePath[MAXPGPATH];
/*
static void XLogArchiveCleanup(const char *xlog);
static void readRecoveryCommandFile(void);
static void exitArchiveRecovery(TimeLineID endTLI,
- uint32 endLogId, uint32 endLogSeg);
+ uint32 endLogId, uint32 endLogSeg);
static bool recoveryStopsHere(XLogRecord *record, bool *includeThis);
static bool AdvanceXLInsertBuffer(void);
static int XLogFileOpen(uint32 log, uint32 seg);
static int XLogFileRead(uint32 log, uint32 seg, int emode);
static bool RestoreArchivedFile(char *path, const char *xlogfname,
- const char *recovername, off_t expectedSize);
+ const char *recovername, off_t expectedSize);
static void PreallocXlogFiles(XLogRecPtr endptr);
static void MoveOfflineLogs(uint32 log, uint32 seg, XLogRecPtr endptr);
static XLogRecord *ReadRecord(XLogRecPtr *RecPtr, int emode, char *buffer);
static bool existsTimeLineHistory(TimeLineID probeTLI);
static TimeLineID findNewestTimeLine(TimeLineID startTLI);
static void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI,
- TimeLineID endTLI,
- uint32 endLogId, uint32 endLogSeg);
+ TimeLineID endTLI,
+ uint32 endLogId, uint32 endLogSeg);
static void WriteControlFile(void);
static void ReadControlFile(void);
static char *str_time(time_t tnow);
static void issue_xlog_fsync(void);
+
#ifdef WAL_DEBUG
static void xlog_outrec(char *buf, XLogRecord *record);
#endif
if (IsBootstrapProcessingMode() && rmid != RM_XLOG_ID)
{
RecPtr.xlogid = 0;
- RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt record */
+ RecPtr.xrecoff = SizeOfXLogLongPHD; /* start of 1st chkpt
+ * record */
return (RecPtr);
}
/*
* If there isn't enough space on the current XLOG page for a record
- * header, advance to the next page (leaving the unused space as zeroes).
+ * header, advance to the next page (leaving the unused space as
+ * zeroes).
*/
updrqst = false;
freespace = INSERT_FREESPACE(Insert);
XLogArchiveNotify(const char *xlog)
{
char archiveStatusPath[MAXPGPATH];
- FILE *fd;
+ FILE *fd;
/* insert an otherwise empty file called .ready */
StatusFilePath(archiveStatusPath, xlog, ".ready");
fd = AllocateFile(archiveStatusPath, "w");
- if (fd == NULL) {
+ if (fd == NULL)
+ {
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not create archive status file \"%s\": %m",
archiveStatusPath)));
return;
}
- if (FreeFile(fd)) {
+ if (FreeFile(fd))
+ {
ereport(LOG,
(errcode_for_file_access(),
errmsg("could not write archive status file \"%s\": %m",
/*
* XLogArchiveIsDone
*
- * Checks for a ".done" archive notification file. This is called when we
+ * Checks for a ".done" archive notification file. This is called when we
* are ready to delete or recycle an old XLOG segment file. If it is okay
* to delete it then return true.
*
/* check for .ready --- this means archiver is still busy with it */
StatusFilePath(archiveStatusPath, xlog, ".ready");
if (stat(archiveStatusPath, &stat_buf) == 0)
- return false;
+ return false;
/* Race condition --- maybe archiver just finished, so recheck */
StatusFilePath(archiveStatusPath, xlog, ".done");
static void
XLogArchiveCleanup(const char *xlog)
{
- char archiveStatusPath[MAXPGPATH];
+ char archiveStatusPath[MAXPGPATH];
/* Remove the .done file */
StatusFilePath(archiveStatusPath, xlog, ".done");
issue_xlog_fsync();
LogwrtResult.Flush = LogwrtResult.Write; /* end of current page */
- if (XLogArchivingActive())
- XLogArchiveNotifySeg(openLogId, openLogSeg);
+ if (XLogArchivingActive())
+ XLogArchiveNotifySeg(openLogId, openLogSeg);
}
if (ispartialpage)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return (fd);
}
* a different timeline)
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
errmsg("could not read file \"%s\": %m", path)));
else
ereport(PANIC,
- (errmsg("insufficient data in file \"%s\"", path)));
+ (errmsg("insufficient data in file \"%s\"", path)));
}
errno = 0;
if ((int) write(fd, buffer, sizeof(buffer)) != (int) sizeof(buffer))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
if (fd < 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return fd;
}
int fd;
/*
- * Loop looking for a suitable timeline ID: we might need to
- * read any of the timelines listed in expectedTLIs.
+ * Loop looking for a suitable timeline ID: we might need to read any
+ * of the timelines listed in expectedTLIs.
*
- * We expect curFileTLI on entry to be the TLI of the preceding file
- * in sequence, or 0 if there was no predecessor. We do not allow
+ * We expect curFileTLI on entry to be the TLI of the preceding file in
+ * sequence, or 0 if there was no predecessor. We do not allow
* curFileTLI to go backwards; this prevents us from picking up the
* wrong file when a parent timeline extends to higher segment numbers
* than the child we want to read.
errno = ENOENT;
ereport(emode,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
- path, log, seg)));
+ errmsg("could not open file \"%s\" (log file %u, segment %u): %m",
+ path, log, seg)));
return -1;
}
RestoreArchivedFile(char *path, const char *xlogfname,
const char *recovername, off_t expectedSize)
{
- char xlogpath[MAXPGPATH];
- char xlogRestoreCmd[MAXPGPATH];
- char *dp;
- char *endp;
+ char xlogpath[MAXPGPATH];
+ char xlogRestoreCmd[MAXPGPATH];
+ char *dp;
+ char *endp;
const char *sp;
- int rc;
+ int rc;
struct stat stat_buf;
/*
* When doing archive recovery, we always prefer an archived log file
* even if a file of the same name exists in XLogDir. The reason is
- * that the file in XLogDir could be an old, un-filled or partly-filled
- * version that was copied and restored as part of backing up $PGDATA.
+ * that the file in XLogDir could be an old, un-filled or
+ * partly-filled version that was copied and restored as part of
+ * backing up $PGDATA.
*
- * We could try to optimize this slightly by checking the local
- * copy lastchange timestamp against the archived copy,
- * but we have no API to do this, nor can we guarantee that the
- * lastchange timestamp was preserved correctly when we copied
- * to archive. Our aim is robustness, so we elect not to do this.
+ * We could try to optimize this slightly by checking the local copy
+ * lastchange timestamp against the archived copy, but we have no API
+ * to do this, nor can we guarantee that the lastchange timestamp was
+ * preserved correctly when we copied to archive. Our aim is
+ * robustness, so we elect not to do this.
*
- * If we cannot obtain the log file from the archive, however, we
- * will try to use the XLogDir file if it exists. This is so that
- * we can make use of log segments that weren't yet transferred to
- * the archive.
+ * If we cannot obtain the log file from the archive, however, we will
+ * try to use the XLogDir file if it exists. This is so that we can
+ * make use of log segments that weren't yet transferred to the
+ * archive.
*
* Notice that we don't actually overwrite any files when we copy back
* from archive because the recoveryRestoreCommand may inadvertently
- * restore inappropriate xlogs, or they may be corrupt, so we may
- * wish to fallback to the segments remaining in current XLogDir later.
- * The copy-from-archive filename is always the same, ensuring that we
+ * restore inappropriate xlogs, or they may be corrupt, so we may wish
+ * to fallback to the segments remaining in current XLogDir later. The
+ * copy-from-archive filename is always the same, ensuring that we
* don't run out of disk space on long recoveries.
*/
snprintf(xlogpath, MAXPGPATH, "%s/%s", XLogDir, recovername);
case 'p':
/* %p: full path of target file */
sp++;
- StrNCpy(dp, xlogpath, endp-dp);
+ StrNCpy(dp, xlogpath, endp - dp);
make_native_path(dp);
dp += strlen(dp);
break;
case 'f':
/* %f: filename of desired file */
sp++;
- StrNCpy(dp, xlogfname, endp-dp);
+ StrNCpy(dp, xlogfname, endp - dp);
dp += strlen(dp);
break;
case '%':
*dp = '\0';
ereport(DEBUG3,
- (errmsg_internal("executing restore command \"%s\"",
+ (errmsg_internal("executing restore command \"%s\"",
xlogRestoreCmd)));
/*
* command apparently succeeded, but let's make sure the file is
* really there now and has the correct size.
*
- * XXX I made wrong-size a fatal error to ensure the DBA would
- * notice it, but is that too strong? We could try to plow ahead
- * with a local copy of the file ... but the problem is that there
+ * XXX I made wrong-size a fatal error to ensure the DBA would notice
+ * it, but is that too strong? We could try to plow ahead with a
+ * local copy of the file ... but the problem is that there
* probably isn't one, and we'd incorrectly conclude we've reached
* the end of WAL and we're done recovering ...
*/
}
/*
- * remember, we rollforward UNTIL the restore fails
- * so failure here is just part of the process...
- * that makes it difficult to determine whether the restore
- * failed because there isn't an archive to restore, or
- * because the administrator has specified the restore
+ * remember, we rollforward UNTIL the restore fails so failure here is
+ * just part of the process... that makes it difficult to determine
+ * whether the restore failed because there isn't an archive to
+ * restore, or because the administrator has specified the restore
* program incorrectly. We have to assume the former.
*/
ereport(DEBUG1,
- (errmsg("could not restore \"%s\" from archive: return code %d",
- xlogfname, rc)));
+ (errmsg("could not restore \"%s\" from archive: return code %d",
+ xlogfname, rc)));
/*
- * if an archived file is not available, there might still be a version
- * of this file in XLogDir, so return that as the filename to open.
+ * if an archived file is not available, there might still be a
+ * version of this file in XLogDir, so return that as the filename to
+ * open.
*
- * In many recovery scenarios we expect this to fail also, but
- * if so that just means we've reached the end of WAL.
+ * In many recovery scenarios we expect this to fail also, but if so that
+ * just means we've reached the end of WAL.
*/
snprintf(path, MAXPGPATH, "%s/%s", XLogDir, xlogfname);
return false;
{
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that
+ * deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
* more complicated.
*
- * We use the alphanumeric sorting property of the filenames to decide
- * which ones are earlier than the lastoff segment.
+ * We use the alphanumeric sorting property of the filenames to
+ * decide which ones are earlier than the lastoff segment.
*/
if (strlen(xlde->d_name) == 24 &&
strspn(xlde->d_name, "0123456789ABCDEF") == 24 &&
strcmp(xlde->d_name + 8, lastoff + 8) <= 0)
{
- bool recycle;
+ bool recycle;
if (XLogArchivingActive())
recycle = XLogArchiveIsDone(xlde->d_name);
- else
+ else
recycle = true;
if (recycle)
{
/* No need for any more future segments... */
ereport(LOG,
- (errmsg("removing transaction log file \"%s\"",
- xlde->d_name)));
+ (errmsg("removing transaction log file \"%s\"",
+ xlde->d_name)));
unlink(path);
}
errno = 0;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
if (!EQ_CRC64(record->xl_crc, crc))
{
ereport(emode,
- (errmsg("incorrect resource manager data checksum in record at %X/%X",
- recptr.xlogid, recptr.xrecoff)));
+ (errmsg("incorrect resource manager data checksum in record at %X/%X",
+ recptr.xlogid, recptr.xrecoff)));
return (false);
}
if (!EQ_CRC64(cbuf, crc))
{
ereport(emode,
- (errmsg("incorrect checksum of backup block %d in record at %X/%X",
- i + 1, recptr.xlogid, recptr.xrecoff)));
+ (errmsg("incorrect checksum of backup block %d in record at %X/%X",
+ i + 1, recptr.xlogid, recptr.xrecoff)));
return (false);
}
blk += sizeof(BkpBlock) + BLCKSZ;
ereport(PANIC,
(errmsg("invalid record offset at %X/%X",
RecPtr->xlogid, RecPtr->xrecoff)));
+
/*
* Since we are going to a random position in WAL, forget any
- * prior state about what timeline we were in, and allow it
- * to be any timeline in expectedTLIs. We also set a flag to
- * allow curFileTLI to go backwards (but we can't reset that
- * variable right here, since we might not change files at all).
+ * prior state about what timeline we were in, and allow it to be
+ * any timeline in expectedTLIs. We also set a flag to allow
+ * curFileTLI to go backwards (but we can't reset that variable
+ * right here, since we might not change files at all).
*/
lastPageTLI = 0; /* see comment in ValidXLOGHeader */
randAccess = true; /* allow curFileTLI to go backwards too */
if (targetRecOff == 0)
{
/*
- * Can only get here in the continuing-from-prev-page case, because
- * XRecOffIsValid eliminated the zero-page-offset case otherwise.
- * Need to skip over the new page's header.
+ * Can only get here in the continuing-from-prev-page case,
+ * because XRecOffIsValid eliminated the zero-page-offset case
+ * otherwise. Need to skip over the new page's header.
*/
tmpRecPtr.xrecoff += pageHeaderSize;
targetRecOff = pageHeaderSize;
ControlFile->system_identifier);
ereport(emode,
(errmsg("WAL file is from different system"),
- errdetail("WAL file SYSID is %s, pg_control SYSID is %s",
- fhdrident_str, sysident_str)));
+ errdetail("WAL file SYSID is %s, pg_control SYSID is %s",
+ fhdrident_str, sysident_str)));
return false;
}
if (longhdr->xlp_seg_size != XLogSegSize)
{
ereport(emode,
(errmsg("WAL file is from different system"),
- errdetail("Incorrect XLOG_SEG_SIZE in page header.")));
+ errdetail("Incorrect XLOG_SEG_SIZE in page header.")));
return false;
}
}
* immediate parent's TLI, we should never see TLI go backwards across
* successive pages of a consistent WAL sequence.
*
- * Of course this check should only be applied when advancing sequentially
- * across pages; therefore ReadRecord resets lastPageTLI to zero when
- * going to a random page.
+ * Of course this check should only be applied when advancing
+ * sequentially across pages; therefore ReadRecord resets lastPageTLI
+ * to zero when going to a random page.
*/
if (hdr->xlp_tli < lastPageTLI)
{
* Try to read a timeline's history file.
*
* If successful, return the list of component TLIs (the given TLI followed by
- * its ancestor TLIs). If we can't find the history file, assume that the
+ * its ancestor TLIs). If we can't find the history file, assume that the
* timeline has no parents, and return a list of just the specified timeline
* ID.
*/
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
char fline[MAXPGPATH];
- FILE *fd;
+ FILE *fd;
if (InArchiveRecovery)
{
else
TLHistoryFilePath(path, targetTLI);
- fd = AllocateFile(path, "r");
+ fd = AllocateFile(path, "r");
if (fd == NULL)
{
if (errno != ENOENT)
result = NIL;
- /*
- * Parse the file...
- */
- while (fgets(fline, MAXPGPATH, fd) != NULL)
+ /*
+ * Parse the file...
+ */
+ while (fgets(fline, MAXPGPATH, fd) != NULL)
{
/* skip leading whitespace and check for # comment */
- char *ptr;
- char *endptr;
- TimeLineID tli;
+ char *ptr;
+ char *endptr;
+ TimeLineID tli;
for (ptr = fline; *ptr; ptr++)
{
tli <= (TimeLineID) linitial_int(result))
ereport(FATAL,
(errmsg("invalid data in history file: %s", fline),
- errhint("Timeline IDs must be in increasing sequence.")));
+ errhint("Timeline IDs must be in increasing sequence.")));
/* Build list with newest item first */
result = lcons_int((int) tli, result);
targetTLI <= (TimeLineID) linitial_int(result))
ereport(FATAL,
(errmsg("invalid data in history file \"%s\"", path),
- errhint("Timeline IDs must be less than child timeline's ID.")));
+ errhint("Timeline IDs must be less than child timeline's ID.")));
result = lcons_int((int) targetTLI, result);
{
char path[MAXPGPATH];
char histfname[MAXFNAMELEN];
- FILE *fd;
+ FILE *fd;
if (InArchiveRecovery)
{
TimeLineID probeTLI;
/*
- * The algorithm is just to probe for the existence of timeline history
- * files. XXX is it useful to allow gaps in the sequence?
+ * The algorithm is just to probe for the existence of timeline
+ * history files. XXX is it useful to allow gaps in the sequence?
*/
newestTLI = startTLI;
- for (probeTLI = startTLI + 1; ; probeTLI++)
+ for (probeTLI = startTLI + 1;; probeTLI++)
{
if (existsTimeLineHistory(probeTLI))
{
* endTLI et al: ID of the last used WAL file, for annotation purposes
*
* Currently this is only used during recovery, and so there are no locking
- * considerations. But we should be just as tense as XLogFileInit to avoid
+ * considerations. But we should be just as tense as XLogFileInit to avoid
* emplacing a bogus file.
*/
static void
int fd;
int nbytes;
- Assert(newTLI > parentTLI); /* else bad selection of newTLI */
+ Assert(newTLI > parentTLI); /* else bad selection of newTLI */
/*
* Write into a temp file name.
* space
*/
unlink(tmppath);
- /* if write didn't set errno, assume problem is no disk space */
+
+ /*
+ * if write didn't set errno, assume problem is no disk
+ * space
+ */
errno = save_errno ? save_errno : ENOSPC;
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not write to file \"%s\": %m", tmppath)));
+ errmsg("could not write to file \"%s\": %m", tmppath)));
}
}
close(srcfd);
/*
* Append one line with the details of this timeline split.
*
- * If we did have a parent file, insert an extra newline just in case
- * the parent file failed to end with one.
+ * If we did have a parent file, insert an extra newline just in case the
+ * parent file failed to end with one.
*/
XLogFileName(xlogfname, endTLI, endLogId, endLogSeg);
int save_errno = errno;
/*
- * If we fail to make the file, delete it to release disk
- * space
+ * If we fail to make the file, delete it to release disk space
*/
unlink(tmppath);
/* if write didn't set errno, assume problem is no disk space */
ereport(FATAL,
(errmsg("database files are incompatible with server"),
errdetail("The database cluster was initialized with XLOG_SEG_SIZE %d,"
- " but the server was compiled with XLOG_SEG_SIZE %d.",
+ " but the server was compiled with XLOG_SEG_SIZE %d.",
ControlFile->xlog_seg_size, XLOG_SEG_SIZE),
errhint("It looks like you need to recompile or initdb.")));
if (ControlFile->nameDataLen != NAMEDATALEN)
void
XLOGShmemInit(void)
{
- bool foundXLog, foundCFile;
+ bool foundXLog,
+ foundCFile;
/* this must agree with space requested by XLOGShmemSize() */
if (XLOGbuffers < MinXLOGbuffers)
crc64 crc;
/*
- * Select a hopefully-unique system identifier code for this installation.
- * We use the result of gettimeofday(), including the fractional seconds
- * field, as being about as unique as we can easily get. (Think not to
- * use random(), since it hasn't been seeded and there's no portable way
- * to seed it other than the system clock value...) The upper half of the
- * uint64 value is just the tv_sec part, while the lower half is the XOR
- * of tv_sec and tv_usec. This is to ensure that we don't lose uniqueness
- * unnecessarily if "uint64" is really only 32 bits wide. A person
- * knowing this encoding can determine the initialization time of the
- * installation, which could perhaps be useful sometimes.
+ * Select a hopefully-unique system identifier code for this
+ * installation. We use the result of gettimeofday(), including the
+ * fractional seconds field, as being about as unique as we can easily
+ * get. (Think not to use random(), since it hasn't been seeded and
+ * there's no portable way to seed it other than the system clock
+ * value...) The upper half of the uint64 value is just the tv_sec
+ * part, while the lower half is the XOR of tv_sec and tv_usec. This
+ * is to ensure that we don't lose uniqueness unnecessarily if
+ * "uint64" is really only 32 bits wide. A person knowing this
+ * encoding can determine the initialization time of the installation,
+ * which could perhaps be useful sometimes.
*/
gettimeofday(&tv, NULL);
sysidentifier = ((uint64) tv.tv_sec) << 32;
errno = ENOSPC;
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not write bootstrap transaction log file: %m")));
+ errmsg("could not write bootstrap transaction log file: %m")));
}
if (pg_fsync(openLogFile) != 0)
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not fsync bootstrap transaction log file: %m")));
+ errmsg("could not fsync bootstrap transaction log file: %m")));
if (close(openLogFile))
ereport(PANIC,
(errcode_for_file_access(),
- errmsg("could not close bootstrap transaction log file: %m")));
+ errmsg("could not close bootstrap transaction log file: %m")));
openLogFile = -1;
static void
readRecoveryCommandFile(void)
{
- char recoveryCommandFile[MAXPGPATH];
- FILE *fd;
- char cmdline[MAXPGPATH];
- TimeLineID rtli = 0;
- bool rtliGiven = false;
- bool syntaxError = false;
-
- snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir);
- fd = AllocateFile(recoveryCommandFile, "r");
+ char recoveryCommandFile[MAXPGPATH];
+ FILE *fd;
+ char cmdline[MAXPGPATH];
+ TimeLineID rtli = 0;
+ bool rtliGiven = false;
+ bool syntaxError = false;
+
+ snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir);
+ fd = AllocateFile(recoveryCommandFile, "r");
if (fd == NULL)
{
if (errno == ENOENT)
return; /* not there, so no archive recovery */
ereport(FATAL,
- (errcode_for_file_access(),
+ (errcode_for_file_access(),
errmsg("could not open recovery command file \"%s\": %m",
recoveryCommandFile)));
}
ereport(LOG,
- (errmsg("starting archive recovery")));
+ (errmsg("starting archive recovery")));
- /*
- * Parse the file...
- */
- while (fgets(cmdline, MAXPGPATH, fd) != NULL)
+ /*
+ * Parse the file...
+ */
+ while (fgets(cmdline, MAXPGPATH, fd) != NULL)
{
/* skip leading whitespace and check for # comment */
- char *ptr;
- char *tok1;
- char *tok2;
+ char *ptr;
+ char *tok1;
+ char *tok2;
for (ptr = cmdline; *ptr; ptr++)
{
continue;
/* identify the quoted parameter value */
- tok1 = strtok(ptr, "'");
+ tok1 = strtok(ptr, "'");
if (!tok1)
{
syntaxError = true;
break;
}
- tok2 = strtok(NULL, "'");
+ tok2 = strtok(NULL, "'");
if (!tok2)
{
syntaxError = true;
break;
}
- if (strcmp(tok1,"restore_command") == 0) {
+ if (strcmp(tok1, "restore_command") == 0)
+ {
recoveryRestoreCommand = pstrdup(tok2);
ereport(LOG,
(errmsg("restore_command = \"%s\"",
recoveryRestoreCommand)));
}
- else if (strcmp(tok1,"recovery_target_timeline") == 0) {
+ else if (strcmp(tok1, "recovery_target_timeline") == 0)
+ {
rtliGiven = true;
if (strcmp(tok2, "latest") == 0)
rtli = 0;
ereport(LOG,
(errmsg("recovery_target_timeline = latest")));
}
- else if (strcmp(tok1,"recovery_target_xid") == 0) {
+ else if (strcmp(tok1, "recovery_target_xid") == 0)
+ {
errno = 0;
recoveryTargetXid = (TransactionId) strtoul(tok2, NULL, 0);
if (errno == EINVAL || errno == ERANGE)
recoveryTarget = true;
recoveryTargetExact = true;
}
- else if (strcmp(tok1,"recovery_target_time") == 0) {
+ else if (strcmp(tok1, "recovery_target_time") == 0)
+ {
/*
* if recovery_target_xid specified, then this overrides
* recovery_target_time
continue;
recoveryTarget = true;
recoveryTargetExact = false;
+
/*
- * Convert the time string given by the user to the time_t format.
- * We use type abstime's input converter because we know abstime
- * has the same representation as time_t.
+ * Convert the time string given by the user to the time_t
+ * format. We use type abstime's input converter because we
+ * know abstime has the same representation as time_t.
*/
recoveryTargetTime = (time_t)
DatumGetAbsoluteTime(DirectFunctionCall1(abstimein,
- CStringGetDatum(tok2)));
+ CStringGetDatum(tok2)));
ereport(LOG,
(errmsg("recovery_target_time = %s",
- DatumGetCString(DirectFunctionCall1(abstimeout,
- AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime))))));
+ DatumGetCString(DirectFunctionCall1(abstimeout,
+ AbsoluteTimeGetDatum((AbsoluteTime) recoveryTargetTime))))));
}
- else if (strcmp(tok1,"recovery_target_inclusive") == 0) {
+ else if (strcmp(tok1, "recovery_target_inclusive") == 0)
+ {
/*
* does nothing if a recovery_target is not also set
*/
FreeFile(fd);
- if (syntaxError)
- ereport(FATAL,
+ if (syntaxError)
+ ereport(FATAL,
(errmsg("syntax error in recovery command file: %s",
cmdline),
- errhint("Lines should have the format parameter = 'value'.")));
+ errhint("Lines should have the format parameter = 'value'.")));
/* Check that required parameters were supplied */
if (recoveryRestoreCommand == NULL)
InArchiveRecovery = true;
/*
- * If user specified recovery_target_timeline, validate it or compute the
- * "latest" value. We can't do this until after we've gotten the restore
- * command and set InArchiveRecovery, because we need to fetch timeline
- * history files from the archive.
+ * If user specified recovery_target_timeline, validate it or compute
+ * the "latest" value. We can't do this until after we've gotten the
+ * restore command and set InArchiveRecovery, because we need to fetch
+ * timeline history files from the archive.
*/
if (rtliGiven)
{
/* Timeline 1 does not have a history file, all else should */
if (rtli != 1 && !existsTimeLineHistory(rtli))
ereport(FATAL,
- (errmsg("recovery_target_timeline %u does not exist",
- rtli)));
+ (errmsg("recovery_target_timeline %u does not exist",
+ rtli)));
recoveryTargetTLI = rtli;
}
else
static void
exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
{
- char recoveryPath[MAXPGPATH];
- char xlogpath[MAXPGPATH];
- char recoveryCommandFile[MAXPGPATH];
- char recoveryCommandDone[MAXPGPATH];
+ char recoveryPath[MAXPGPATH];
+ char xlogpath[MAXPGPATH];
+ char recoveryCommandFile[MAXPGPATH];
+ char recoveryCommandDone[MAXPGPATH];
/*
* We are no longer in archive recovery state.
InArchiveRecovery = false;
/*
- * We should have the ending log segment currently open. Verify,
- * and then close it (to avoid problems on Windows with trying to
- * rename or delete an open file).
+ * We should have the ending log segment currently open. Verify, and
+ * then close it (to avoid problems on Windows with trying to rename
+ * or delete an open file).
*/
Assert(readFile >= 0);
Assert(readId == endLogId);
readFile = -1;
/*
- * If the segment was fetched from archival storage, we want to replace
- * the existing xlog segment (if any) with the archival version. This
- * is because whatever is in XLogDir is very possibly older than what
- * we have from the archives, since it could have come from restoring
- * a PGDATA backup. In any case, the archival version certainly is
- * more descriptive of what our current database state is, because that
- * is what we replayed from.
+ * If the segment was fetched from archival storage, we want to
+ * replace the existing xlog segment (if any) with the archival
+ * version. This is because whatever is in XLogDir is very possibly
+ * older than what we have from the archives, since it could have come
+ * from restoring a PGDATA backup. In any case, the archival version
+ * certainly is more descriptive of what our current database state
+ * is, because that is what we replayed from.
*
* Note that if we are establishing a new timeline, ThisTimeLineID is
- * already set to the new value, and so we will create a new file instead
- * of overwriting any existing file.
+ * already set to the new value, and so we will create a new file
+ * instead of overwriting any existing file.
*/
snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYXLOG", XLogDir);
XLogFilePath(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
* RECOVERYXLOG laying about, get rid of it.
*/
unlink(recoveryPath); /* ignore any error */
+
/*
* If we are establishing a new timeline, we have to copy data
* from the last WAL segment of the old timeline to create a
}
/*
- * Let's just make real sure there are not .ready or .done flags posted
- * for the new segment.
+ * Let's just make real sure there are not .ready or .done flags
+ * posted for the new segment.
*/
XLogFileName(xlogpath, ThisTimeLineID, endLogId, endLogSeg);
XLogArchiveCleanup(xlogpath);
/* Get rid of any remaining recovered timeline-history file, too */
snprintf(recoveryPath, MAXPGPATH, "%s/RECOVERYHISTORY", XLogDir);
- unlink(recoveryPath); /* ignore any error */
+ unlink(recoveryPath); /* ignore any error */
/*
- * Rename the config file out of the way, so that we don't accidentally
- * re-enter archive recovery mode in a subsequent crash.
+ * Rename the config file out of the way, so that we don't
+ * accidentally re-enter archive recovery mode in a subsequent crash.
*/
- snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir);
- snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir);
+ snprintf(recoveryCommandFile, MAXPGPATH, "%s/recovery.conf", DataDir);
+ snprintf(recoveryCommandDone, MAXPGPATH, "%s/recovery.done", DataDir);
unlink(recoveryCommandDone);
if (rename(recoveryCommandFile, recoveryCommandDone) != 0)
ereport(FATAL,
recoveryStopsHere(XLogRecord *record, bool *includeThis)
{
bool stopsHere;
- uint8 record_info;
- time_t recordXtime;
+ uint8 record_info;
+ time_t recordXtime;
/* Do we have a PITR target at all? */
if (!recoveryTarget)
record_info = record->xl_info & ~XLR_INFO_MASK;
if (record_info == XLOG_XACT_COMMIT)
{
- xl_xact_commit *recordXactCommitData;
+ xl_xact_commit *recordXactCommitData;
recordXactCommitData = (xl_xact_commit *) XLogRecGetData(record);
recordXtime = recordXactCommitData->xtime;
}
else if (record_info == XLOG_XACT_ABORT)
{
- xl_xact_abort *recordXactAbortData;
+ xl_xact_abort *recordXactAbortData;
recordXactAbortData = (xl_xact_abort *) XLogRecGetData(record);
recordXtime = recordXactAbortData->xtime;
if (recoveryTargetExact)
{
/*
- * there can be only one transaction end record
- * with this exact transactionid
+ * there can be only one transaction end record with this exact
+ * transactionid
*
- * when testing for an xid, we MUST test for
- * equality only, since transactions are numbered
- * in the order they start, not the order they
- * complete. A higher numbered xid will complete
- * before you about 50% of the time...
+ * when testing for an xid, we MUST test for equality only, since
+ * transactions are numbered in the order they start, not the
+ * order they complete. A higher numbered xid will complete before
+ * you about 50% of the time...
*/
stopsHere = (record->xl_xid == recoveryTargetXid);
if (stopsHere)
else
{
/*
- * there can be many transactions that
- * share the same commit time, so
- * we stop after the last one, if we are
- * inclusive, or stop at the first one
- * if we are exclusive
+ * there can be many transactions that share the same commit time,
+ * so we stop after the last one, if we are inclusive, or stop at
+ * the first one if we are exclusive
*/
if (recoveryTargetInclusive)
stopsHere = (recordXtime > recoveryTargetTime);
if (recoveryStopAfter)
ereport(LOG,
(errmsg("recovery stopping after commit of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
else
ereport(LOG,
(errmsg("recovery stopping before commit of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
}
else
{
if (recoveryStopAfter)
ereport(LOG,
(errmsg("recovery stopping after abort of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
else
ereport(LOG,
(errmsg("recovery stopping before abort of transaction %u, time %s",
- recoveryStopXid, str_time(recoveryStopTime))));
+ recoveryStopXid, str_time(recoveryStopTime))));
}
}
#endif
/*
- * Initialize on the assumption we want to recover to the same timeline
- * that's active according to pg_control.
+ * Initialize on the assumption we want to recover to the same
+ * timeline that's active according to pg_control.
*/
recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID;
/*
- * Check for recovery control file, and if so set up state for
- * offline recovery
+ * Check for recovery control file, and if so set up state for offline
+ * recovery
*/
readRecoveryCommandFile();
* timeline.
*/
if (!list_member_int(expectedTLIs,
- (int) ControlFile->checkPointCopy.ThisTimeLineID))
+ (int) ControlFile->checkPointCopy.ThisTimeLineID))
ereport(FATAL,
(errmsg("requested timeline %u is not a child of database system timeline %u",
recoveryTargetTLI,
if (read_backup_label(&checkPointLoc))
{
/*
- * When a backup_label file is present, we want to roll forward from
- * the checkpoint it identifies, rather than using pg_control.
+ * When a backup_label file is present, we want to roll forward
+ * from the checkpoint it identifies, rather than using
+ * pg_control.
*/
record = ReadCheckpointRecord(checkPointLoc, 0, buffer);
if (record != NULL)
{
ereport(LOG,
(errmsg("checkpoint record is at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
InRecovery = true; /* force recovery even if SHUTDOWNED */
}
else
{
ereport(PANIC,
- (errmsg("could not locate required checkpoint record"),
- errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label.")));
+ (errmsg("could not locate required checkpoint record"),
+ errhint("If you are not restoring from a backup, try removing $PGDATA/backup_label.")));
}
}
else
{
/*
- * Get the last valid checkpoint record. If the latest one according
- * to pg_control is broken, try the next-to-last one.
+ * Get the last valid checkpoint record. If the latest one
+ * according to pg_control is broken, try the next-to-last one.
*/
checkPointLoc = ControlFile->checkPoint;
record = ReadCheckpointRecord(checkPointLoc, 1, buffer);
{
ereport(LOG,
(errmsg("checkpoint record is at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
}
else
{
if (record != NULL)
{
ereport(LOG,
- (errmsg("using previous checkpoint record at %X/%X",
- checkPointLoc.xlogid, checkPointLoc.xrecoff)));
- InRecovery = true; /* force recovery even if SHUTDOWNED */
+ (errmsg("using previous checkpoint record at %X/%X",
+ checkPointLoc.xlogid, checkPointLoc.xrecoff)));
+ InRecovery = true; /* force recovery even if
+ * SHUTDOWNED */
}
else
ereport(PANIC,
- (errmsg("could not locate a valid checkpoint record")));
+ (errmsg("could not locate a valid checkpoint record")));
}
}
ShmemVariableCache->oidCount = 0;
/*
- * We must replay WAL entries using the same TimeLineID they were created
- * under, so temporarily adopt the TLI indicated by the checkpoint (see
- * also xlog_redo()).
+ * We must replay WAL entries using the same TimeLineID they were
+ * created under, so temporarily adopt the TLI indicated by the
+ * checkpoint (see also xlog_redo()).
*/
ThisTimeLineID = checkPoint.ThisTimeLineID;
checkPoint.undo = RecPtr;
/*
- * Check whether we need to force recovery from WAL. If it appears
- * to have been a clean shutdown and we did not have a recovery.conf
+ * Check whether we need to force recovery from WAL. If it appears to
+ * have been a clean shutdown and we did not have a recovery.conf
* file, then assume no recovery needed.
*/
if (XLByteLT(checkPoint.undo, RecPtr) ||
*/
if (recoveryStopsHere(record, &recoveryApply))
{
- needNewTimeLine = true; /* see below */
+ needNewTimeLine = true; /* see below */
recoveryContinue = false;
if (!recoveryApply)
break;
record = ReadRecord(NULL, LOG, buffer);
} while (record != NULL && recoveryContinue);
+
/*
* end of main redo apply loop
*/
if (needNewTimeLine) /* stopped because of stop request */
ereport(FATAL,
(errmsg("requested recovery stop point is before end time of backup dump")));
- else /* ran off end of WAL */
+ else
+/* ran off end of WAL */
ereport(FATAL,
(errmsg("WAL ends before end time of backup dump")));
}
/*
* Consider whether we need to assign a new timeline ID.
*
- * If we stopped short of the end of WAL during recovery, then we
- * are generating a new timeline and must assign it a unique new ID.
- * Otherwise, we can just extend the timeline we were in when we
- * ran out of WAL.
+ * If we stopped short of the end of WAL during recovery, then we are
+ * generating a new timeline and must assign it a unique new ID.
+ * Otherwise, we can just extend the timeline we were in when we ran
+ * out of WAL.
*/
if (needNewTimeLine)
{
XLogCtl->ThisTimeLineID = ThisTimeLineID;
/*
- * We are now done reading the old WAL. Turn off archive fetching
- * if it was active, and make a writable copy of the last WAL segment.
+ * We are now done reading the old WAL. Turn off archive fetching if
+ * it was active, and make a writable copy of the last WAL segment.
* (Note that we also have a copy of the last block of the old WAL in
* readBuf; we will use that below.)
*/
* XLogWrite()).
*
* Note: it might seem we should do AdvanceXLInsertBuffer() here, but
- * this is sufficient. The first actual attempt to insert a log
+ * this is sufficient. The first actual attempt to insert a log
* record will advance the insert state.
*/
XLogCtl->Write.curridx = NextBufIdx(0);
XLogCloseRelationCache();
/*
- * Now that we've checkpointed the recovery, it's safe to
- * flush old backup_label, if present.
+ * Now that we've checkpointed the recovery, it's safe to flush
+ * old backup_label, if present.
*/
remove_backup_label();
}
break;
default:
ereport(LOG,
- (errmsg("invalid checkpoint link in backup_label file")));
+ (errmsg("invalid checkpoint link in backup_label file")));
break;
}
return NULL;
{
case 1:
ereport(LOG,
- (errmsg("invalid xl_info in primary checkpoint record")));
+ (errmsg("invalid xl_info in primary checkpoint record")));
break;
case 2:
ereport(LOG,
{
case 1:
ereport(LOG,
- (errmsg("invalid length of primary checkpoint record")));
+ (errmsg("invalid length of primary checkpoint record")));
break;
case 2:
ereport(LOG,
* so there's a risk of deadlock. Need to find a better solution. See
* pgsql-hackers discussion of 17-Dec-01.
*
- * XXX actually, the whole UNDO code is dead code and unlikely to ever
- * be revived, so the lack of a good solution here is not troubling.
+ * XXX actually, the whole UNDO code is dead code and unlikely to ever be
+ * revived, so the lack of a good solution here is not troubling.
*/
#ifdef NOT_USED
checkPoint.undo = GetUndoRecPtr();
PreallocXlogFiles(recptr);
/*
- * Truncate pg_subtrans if possible. We can throw away all data before
- * the oldest XMIN of any running transaction. No future transaction will
- * attempt to reference any pg_subtrans entry older than that (see Asserts
- * in subtrans.c). During recovery, though, we mustn't do this because
- * StartupSUBTRANS hasn't been called yet.
+ * Truncate pg_subtrans if possible. We can throw away all data
+ * before the oldest XMIN of any running transaction. No future
+ * transaction will attempt to reference any pg_subtrans entry older
+ * than that (see Asserts in subtrans.c). During recovery, though, we
+ * mustn't do this because StartupSUBTRANS hasn't been called yet.
*/
if (!InRecovery)
TruncateSUBTRANS(GetOldestXmin(true));
ShmemVariableCache->nextXid = checkPoint.nextXid;
ShmemVariableCache->nextOid = checkPoint.nextOid;
ShmemVariableCache->oidCount = 0;
+
/*
- * TLI may change in a shutdown checkpoint, but it shouldn't decrease
+ * TLI may change in a shutdown checkpoint, but it shouldn't
+ * decrease
*/
if (checkPoint.ThisTimeLineID != ThisTimeLineID)
{
(int) checkPoint.ThisTimeLineID))
ereport(PANIC,
(errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
- checkPoint.ThisTimeLineID, ThisTimeLineID)));
+ checkPoint.ThisTimeLineID, ThisTimeLineID)));
/* Following WAL records should be run with new TLI */
ThisTimeLineID = checkPoint.ThisTimeLineID;
}
sprintf(buf + strlen(buf), ": %s",
RmgrTable[record->xl_rmid].rm_name);
}
-
-#endif /* WAL_DEBUG */
+#endif /* WAL_DEBUG */
/*
char *backupidstr;
XLogRecPtr checkpointloc;
XLogRecPtr startpoint;
- time_t stamp_time;
+ time_t stamp_time;
char strfbuf[128];
char labelfilepath[MAXPGPATH];
char xlogfilename[MAXFNAMELEN];
struct stat stat_buf;
FILE *fp;
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to run a backup"))));
backupidstr = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(backupid)));
+ PointerGetDatum(backupid)));
+
/*
- * Force a CHECKPOINT. This is not strictly necessary, but it seems
- * like a good idea to minimize the amount of past WAL needed to use the
- * backup. Also, this guarantees that two successive backup runs
- * will have different checkpoint positions and hence different history
- * file names, even if nothing happened in between.
+ * Force a CHECKPOINT. This is not strictly necessary, but it seems
+ * like a good idea to minimize the amount of past WAL needed to use
+ * the backup. Also, this guarantees that two successive backup runs
+ * will have different checkpoint positions and hence different
+ * history file names, even if nothing happened in between.
*/
RequestCheckpoint(true);
+
/*
* Now we need to fetch the checkpoint record location, and also its
- * REDO pointer. The oldest point in WAL that would be needed to restore
- * starting from the checkpoint is precisely the REDO pointer.
+ * REDO pointer. The oldest point in WAL that would be needed to
+ * restore starting from the checkpoint is precisely the REDO pointer.
*/
LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
checkpointloc = ControlFile->checkPoint;
XLByteToSeg(startpoint, _logId, _logSeg);
XLogFileName(xlogfilename, ThisTimeLineID, _logId, _logSeg);
+
/*
- * We deliberately use strftime/localtime not the src/timezone functions,
- * so that backup labels will consistently be recorded in the same
- * timezone regardless of TimeZone setting. This matches elog.c's
- * practice.
+ * We deliberately use strftime/localtime not the src/timezone
+ * functions, so that backup labels will consistently be recorded in
+ * the same timezone regardless of TimeZone setting. This matches
+ * elog.c's practice.
*/
stamp_time = time(NULL);
strftime(strfbuf, sizeof(strfbuf),
"%Y-%m-%d %H:%M:%S %Z",
localtime(&stamp_time));
+
/*
- * Check for existing backup label --- implies a backup is already running
+ * Check for existing backup label --- implies a backup is already
+ * running
*/
snprintf(labelfilepath, MAXPGPATH, "%s/backup_label", DataDir);
if (stat(labelfilepath, &stat_buf) != 0)
errmsg("a backup is already in progress"),
errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
labelfilepath)));
+
/*
* Okay, write the file
*/
(errcode_for_file_access(),
errmsg("could not write file \"%s\": %m",
labelfilepath)));
+
/*
* We're done. As a convenience, return the starting WAL offset.
*/
snprintf(xlogfilename, sizeof(xlogfilename), "%X/%X",
startpoint.xlogid, startpoint.xrecoff);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(xlogfilename)));
+ CStringGetDatum(xlogfilename)));
PG_RETURN_TEXT_P(result);
}
XLogCtlInsert *Insert = &XLogCtl->Insert;
XLogRecPtr startpoint;
XLogRecPtr stoppoint;
- time_t stamp_time;
+ time_t stamp_time;
char strfbuf[128];
char labelfilepath[MAXPGPATH];
char histfilepath[MAXPGPATH];
char ch;
int ich;
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to run a backup"))));
+
/*
* Get the current end-of-WAL position; it will be unsafe to use this
* dump to restore to a point in advance of this time.
XLByteToSeg(stoppoint, _logId, _logSeg);
XLogFileName(stopxlogfilename, ThisTimeLineID, _logId, _logSeg);
+
/*
- * We deliberately use strftime/localtime not the src/timezone functions,
- * so that backup labels will consistently be recorded in the same
- * timezone regardless of TimeZone setting. This matches elog.c's
- * practice.
+ * We deliberately use strftime/localtime not the src/timezone
+ * functions, so that backup labels will consistently be recorded in
+ * the same timezone regardless of TimeZone setting. This matches
+ * elog.c's practice.
*/
stamp_time = time(NULL);
strftime(strfbuf, sizeof(strfbuf),
"%Y-%m-%d %H:%M:%S %Z",
localtime(&stamp_time));
+
/*
* Open the existing label file
*/
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("a backup is not in progress")));
}
+
/*
* Read and parse the START WAL LOCATION line (this code is pretty
- * crude, but we are not expecting any variability in the file format).
+ * crude, but we are not expecting any variability in the file
+ * format).
*/
if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %24s)%c",
&startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("invalid data in file \"%s\"", labelfilepath)));
+
/*
* Write the backup history file
*/
(errcode_for_file_access(),
errmsg("could not write file \"%s\": %m",
histfilepath)));
+
/*
* Close and remove the backup label file
*/
(errcode_for_file_access(),
errmsg("could not remove file \"%s\": %m",
labelfilepath)));
+
/*
* Notify archiver that history file may be archived immediately
*/
startpoint.xrecoff % XLogSegSize);
XLogArchiveNotify(histfilepath);
}
+
/*
* We're done. As a convenience, return the ending WAL offset.
*/
snprintf(stopxlogfilename, sizeof(stopxlogfilename), "%X/%X",
stoppoint.xlogid, stoppoint.xrecoff);
result = DatumGetTextP(DirectFunctionCall1(textin,
- CStringGetDatum(stopxlogfilename)));
+ CStringGetDatum(stopxlogfilename)));
PG_RETURN_TEXT_P(result);
}
*
* If we see a backup_label during recovery, we assume that we are recovering
* from a backup dump file, and we therefore roll forward from the checkpoint
- * identified by the label file, NOT what pg_control says. This avoids the
+ * identified by the label file, NOT what pg_control says. This avoids the
* problem that pg_control might have been archived one or more checkpoints
* later than the start of the dump, and so if we rely on it as the start
* point, we will fail to restore a consistent database state.
labelfilepath)));
return false; /* it's not there, all is fine */
}
+
/*
- * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code
- * is pretty crude, but we are not expecting any variability in the file
- * format).
+ * Read and parse the START WAL LOCATION and CHECKPOINT lines (this
+ * code is pretty crude, but we are not expecting any variability in
+ * the file format).
*/
if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c",
&startpoint.xlogid, &startpoint.xrecoff, &tli,
(errcode_for_file_access(),
errmsg("could not read file \"%s\": %m",
labelfilepath)));
+
/*
* Try to retrieve the backup history file (no error if we can't)
*/
BackupHistoryFilePath(histfilepath, tli, _logId, _logSeg,
startpoint.xrecoff % XLogSegSize);
- fp = AllocateFile(histfilepath, "r");
+ fp = AllocateFile(histfilepath, "r");
if (fp)
{
/*
* Parse history file to identify stop point.
*/
if (fscanf(fp, "START WAL LOCATION: %X/%X (file %24s)%c",
- &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
+ &startpoint.xlogid, &startpoint.xrecoff, startxlogfilename,
&ch) != 4 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", histfilename)));
+ errmsg("invalid data in file \"%s\"", histfilename)));
if (fscanf(fp, "STOP WAL LOCATION: %X/%X (file %24s)%c",
- &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename,
+ &stoppoint.xlogid, &stoppoint.xrecoff, stopxlogfilename,
&ch) != 4 || ch != '\n')
ereport(FATAL,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid data in file \"%s\"", histfilename)));
+ errmsg("invalid data in file \"%s\"", histfilename)));
recoveryMinXlogOffset = stoppoint;
if (ferror(fp) || FreeFile(fp))
ereport(FATAL,
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.33 2004/08/29 04:12:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/access/transam/xlogutils.c,v 1.34 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
res->reldata.rd_node = rnode;
/*
- * We set up the lockRelId in case anything tries to lock the dummy
- * relation. Note that this is fairly bogus since relNode may be
- * different from the relation's OID. It shouldn't really matter
- * though, since we are presumably running by ourselves and can't
- * have any lock conflicts ...
+ * We set up the lockRelId in case anything tries to lock the
+ * dummy relation. Note that this is fairly bogus since relNode
+ * may be different from the relation's OID. It shouldn't really
+ * matter though, since we are presumably running by ourselves and
+ * can't have any lock conflicts ...
*/
res->reldata.rd_lockInfo.lockRelId.dbId = rnode.dbNode;
res->reldata.rd_lockInfo.lockRelId.relId = rnode.relNode;
res->reldata.rd_targblock = InvalidBlockNumber;
res->reldata.rd_smgr = smgropen(res->reldata.rd_node);
+
/*
* Create the target file if it doesn't already exist. This lets
* us cope if the replay sequence contains writes to a relation
* that is later deleted. (The original coding of this routine
* would instead return NULL, causing the writes to be suppressed.
- * But that seems like it risks losing valuable data if the filesystem
- * loses an inode during a crash. Better to write the data until we
- * are actually told to delete the file.)
+ * But that seems like it risks losing valuable data if the
+ * filesystem loses an inode during a crash. Better to write the
+ * data until we are actually told to delete the file.)
*/
smgrcreate(res->reldata.rd_smgr, res->reldata.rd_istemp, true);
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.192 2004/08/29 04:12:25 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.193 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static const struct typinfo TypInfo[] = {
{"bool", BOOLOID, 0, 1, true, 'c', 'p',
- F_BOOLIN, F_BOOLOUT},
+ F_BOOLIN, F_BOOLOUT},
{"bytea", BYTEAOID, 0, -1, false, 'i', 'x',
- F_BYTEAIN, F_BYTEAOUT},
+ F_BYTEAIN, F_BYTEAOUT},
{"char", CHAROID, 0, 1, true, 'c', 'p',
- F_CHARIN, F_CHAROUT},
+ F_CHARIN, F_CHAROUT},
{"name", NAMEOID, CHAROID, NAMEDATALEN, false, 'i', 'p',
- F_NAMEIN, F_NAMEOUT},
+ F_NAMEIN, F_NAMEOUT},
{"int2", INT2OID, 0, 2, true, 's', 'p',
- F_INT2IN, F_INT2OUT},
+ F_INT2IN, F_INT2OUT},
{"int4", INT4OID, 0, 4, true, 'i', 'p',
- F_INT4IN, F_INT4OUT},
+ F_INT4IN, F_INT4OUT},
{"regproc", REGPROCOID, 0, 4, true, 'i', 'p',
- F_REGPROCIN, F_REGPROCOUT},
+ F_REGPROCIN, F_REGPROCOUT},
{"regclass", REGCLASSOID, 0, 4, true, 'i', 'p',
- F_REGCLASSIN, F_REGCLASSOUT},
+ F_REGCLASSIN, F_REGCLASSOUT},
{"regtype", REGTYPEOID, 0, 4, true, 'i', 'p',
- F_REGTYPEIN, F_REGTYPEOUT},
+ F_REGTYPEIN, F_REGTYPEOUT},
{"text", TEXTOID, 0, -1, false, 'i', 'x',
- F_TEXTIN, F_TEXTOUT},
+ F_TEXTIN, F_TEXTOUT},
{"oid", OIDOID, 0, 4, true, 'i', 'p',
- F_OIDIN, F_OIDOUT},
+ F_OIDIN, F_OIDOUT},
{"tid", TIDOID, 0, 6, false, 's', 'p',
- F_TIDIN, F_TIDOUT},
+ F_TIDIN, F_TIDOUT},
{"xid", XIDOID, 0, 4, true, 'i', 'p',
- F_XIDIN, F_XIDOUT},
+ F_XIDIN, F_XIDOUT},
{"cid", CIDOID, 0, 4, true, 'i', 'p',
- F_CIDIN, F_CIDOUT},
+ F_CIDIN, F_CIDOUT},
{"int2vector", INT2VECTOROID, INT2OID, INDEX_MAX_KEYS * 2, false, 's', 'p',
- F_INT2VECTORIN, F_INT2VECTOROUT},
+ F_INT2VECTORIN, F_INT2VECTOROUT},
{"oidvector", OIDVECTOROID, OIDOID, INDEX_MAX_KEYS * 4, false, 'i', 'p',
- F_OIDVECTORIN, F_OIDVECTOROUT},
+ F_OIDVECTORIN, F_OIDVECTOROUT},
{"_int4", INT4ARRAYOID, INT4OID, -1, false, 'i', 'x',
- F_ARRAY_IN, F_ARRAY_OUT},
+ F_ARRAY_IN, F_ARRAY_OUT},
{"_text", 1009, TEXTOID, -1, false, 'i', 'x',
- F_ARRAY_IN, F_ARRAY_OUT},
+ F_ARRAY_IN, F_ARRAY_OUT},
{"_aclitem", 1034, ACLITEMOID, -1, false, 'i', 'x',
- F_ARRAY_IN, F_ARRAY_OUT}
+ F_ARRAY_IN, F_ARRAY_OUT}
};
-static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo);
+static const int n_types = sizeof(TypInfo) / sizeof(struct typinfo);
struct typmap
{ /* a hack */
usage(void)
{
write_stderr("Usage:\n"
- " postgres -boot [OPTION]... DBNAME\n"
- " -c NAME=VALUE set run-time parameter\n"
- " -d 1-5 debug level\n"
- " -D datadir data directory\n"
- " -F turn off fsync\n"
- " -o file send debug output to file\n"
- " -x num internal use\n");
+ " postgres -boot [OPTION]... DBNAME\n"
+ " -c NAME=VALUE set run-time parameter\n"
+ " -d 1-5 debug level\n"
+ " -D datadir data directory\n"
+ " -F turn off fsync\n"
+ " -o file send debug output to file\n"
+ " -x num internal use\n");
proc_exit(1);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.106 2004/08/29 04:12:26 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.107 2004/08/29 05:06:41 momjian Exp $
*
* NOTES
* See acl.h.
* Determine the effective grantor ID for a GRANT or REVOKE operation.
*
* Ordinarily this is just the current user, but when a superuser does
- * GRANT or REVOKE, we pretend he is the object owner. This ensures that
+ * GRANT or REVOKE, we pretend he is the object owner. This ensures that
* all granted privileges appear to flow from the object owner, and there
* are never multiple "original sources" of a privilege.
*/
foreach(j, grantees)
{
PrivGrantee *grantee = (PrivGrantee *) lfirst(j);
- AclItem aclitem;
+ AclItem aclitem;
uint32 idtype;
Acl *newer_acl;
if (grantee->username)
{
- aclitem.ai_grantee = get_usesysid(grantee->username);
+ aclitem. ai_grantee = get_usesysid(grantee->username);
idtype = ACL_IDTYPE_UID;
}
else if (grantee->groupname)
{
- aclitem.ai_grantee = get_grosysid(grantee->groupname);
+ aclitem. ai_grantee = get_grosysid(grantee->groupname);
idtype = ACL_IDTYPE_GID;
}
else
{
- aclitem.ai_grantee = ACL_ID_WORLD;
+ aclitem. ai_grantee = ACL_ID_WORLD;
idtype = ACL_IDTYPE_WORLD;
}
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
errmsg("grant options can only be granted to individual users")));
- aclitem.ai_grantor = grantor_uid;
+ aclitem. ai_grantor = grantor_uid;
/*
* The asymmetry in the conditions here comes from the spec. In
- * GRANT, the grant_option flag signals WITH GRANT OPTION, which means
- * to grant both the basic privilege and its grant option. But in
- * REVOKE, plain revoke revokes both the basic privilege and its
- * grant option, while REVOKE GRANT OPTION revokes only the option.
+ * GRANT, the grant_option flag signals WITH GRANT OPTION, which
+ * means to grant both the basic privilege and its grant option.
+ * But in REVOKE, plain revoke revokes both the basic privilege
+ * and its grant option, while REVOKE GRANT OPTION revokes only
+ * the option.
*/
ACLITEM_SET_PRIVS_IDTYPE(aclitem,
- (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
- (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS,
+ (is_grant || !grant_option) ? privileges : ACL_NO_RIGHTS,
+ (!is_grant || grant_option) ? privileges : ACL_NO_RIGHTS,
idtype);
newer_acl = aclupdate(new_acl, &aclitem, modechg, owner_uid, behavior);
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("language \"%s\" is not trusted", langname),
- errhint("Only superusers may use untrusted languages.")));
+ errhint("Only superusers may use untrusted languages.")));
/*
* Note: for now, languages are treated as owned by the bootstrap
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
if (priv & ~((AclMode) ACL_ALL_RIGHTS_TABLESPACE))
ereport(ERROR,
(errcode(ERRCODE_INVALID_GRANT_OPERATION),
- errmsg("invalid privilege type %s for tablespace",
- privilege_to_string(priv))));
+ errmsg("invalid privilege type %s for tablespace",
+ privilege_to_string(priv))));
privileges |= priv;
}
}
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", spcname)));
+ errmsg("tablespace \"%s\" does not exist", spcname)));
pg_tablespace_tuple = (Form_pg_tablespace) GETSTRUCT(tuple);
ownerId = pg_tablespace_tuple->spcowner;
/*
* Restrict the operation to what we can actually grant or revoke,
- * and issue a warning if appropriate. (For REVOKE this isn't quite
- * what the spec says to do: the spec seems to want a warning only
- * if no privilege bits actually change in the ACL. In practice
- * that behavior seems much too noisy, as well as inconsistent with
- * the GRANT case.)
+ * and issue a warning if appropriate. (For REVOKE this isn't
+ * quite what the spec says to do: the spec seems to want a
+ * warning only if no privilege bits actually change in the ACL.
+ * In practice that behavior seems much too noisy, as well as
+ * inconsistent with the GRANT case.)
*/
this_privileges = privileges & my_goptions;
if (stmt->is_grant)
/*
* Deny anyone permission to update a system catalog unless
* pg_shadow.usecatupd is set. (This is to let superusers protect
- * themselves from themselves.) Also allow it if allowSystemTableMods.
+ * themselves from themselves.) Also allow it if
+ * allowSystemTableMods.
*
- * As of 7.4 we have some updatable system views; those shouldn't
- * be protected in this way. Assume the view rules can take care
- * of themselves.
+ * As of 7.4 we have some updatable system views; those shouldn't be
+ * protected in this way. Assume the view rules can take care of
+ * themselves.
*/
if ((mask & (ACL_INSERT | ACL_UPDATE | ACL_DELETE)) &&
IsSystemClass(classForm) &&
return mask;
/*
- * If we have been assigned this namespace as a temp namespace,
- * check to make sure we have CREATE TEMP permission on the database,
- * and if so act as though we have all standard (but not GRANT OPTION)
+ * If we have been assigned this namespace as a temp namespace, check
+ * to make sure we have CREATE TEMP permission on the database, and if
+ * so act as though we have all standard (but not GRANT OPTION)
* permissions on the namespace. If we don't have CREATE TEMP, act as
* though we have only USAGE (and not CREATE) rights.
*
- * This may seem redundant given the check in InitTempTableNamespace,
- * but it really isn't since current user ID may have changed since then.
+ * This may seem redundant given the check in InitTempTableNamespace, but
+ * it really isn't since current user ID may have changed since then.
* The upshot of this behavior is that a SECURITY DEFINER function can
- * create temp tables that can then be accessed (if permission is granted)
- * by code in the same session that doesn't have permissions to create
- * temp tables.
+ * create temp tables that can then be accessed (if permission is
+ * granted) by code in the same session that doesn't have permissions
+ * to create temp tables.
*
* XXX Would it be safe to ereport a special error message as
* InitTempTableNamespace does? Returning zero here means we'll get a
- * generic "permission denied for schema pg_temp_N" message, which is not
- * remarkably user-friendly.
+ * generic "permission denied for schema pg_temp_N" message, which is
+ * not remarkably user-friendly.
*/
if (isTempNamespace(nsp_oid))
{
AclId ownerId;
/*
- * Only shared relations can be stored in global space; don't let
- * even superusers override this
+ * Only shared relations can be stored in global space; don't let even
+ * superusers override this
*/
if (spc_oid == GLOBALTABLESPACE_OID && !IsBootstrapProcessingMode())
return 0;
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace with OID %u does not exist", spc_oid)));
+ errmsg("tablespace with OID %u does not exist", spc_oid)));
ownerId = ((Form_pg_tablespace) GETSTRUCT(tuple))->spcowner;
if (!HeapTupleIsValid(spctuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace with OID %u does not exist", spc_oid)));
+ errmsg("tablespace with OID %u does not exist", spc_oid)));
spcowner = ((Form_pg_tablespace) GETSTRUCT(spctuple))->spcowner;
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("conversion with OID %u does not exist", conv_oid)));
+ errmsg("conversion with OID %u does not exist", conv_oid)));
owner_id = ((Form_pg_conversion) GETSTRUCT(tuple))->conowner;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.38 2004/08/29 04:12:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.39 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (var->varno <= 0 || var->varno > list_length(rtable))
elog(ERROR, "invalid varno %d", var->varno);
rte = rt_fetch(var->varno, rtable);
+
/*
* A whole-row Var references no specific columns, so adds no new
* dependency.
var->varattno > list_length(rte->joinaliasvars))
elog(ERROR, "invalid varattno %d", var->varattno);
find_expr_references_walker((Node *) list_nth(rte->joinaliasvars,
- var->varattno - 1),
+ var->varattno - 1),
context);
list_free(context->rtables);
context->rtables = save_rtables;
getRelationDescription(&buffer, object->objectId);
if (object->objectSubId != 0)
appendStringInfo(&buffer, gettext(" column %s"),
- get_relid_attribute_name(object->objectId,
- object->objectSubId));
+ get_relid_attribute_name(object->objectId,
+ object->objectSubId));
break;
case OCLASS_PROC:
appendStringInfo(&buffer, gettext("operator class %s for %s"),
quote_qualified_identifier(nspname,
- NameStr(opcForm->opcname)),
+ NameStr(opcForm->opcname)),
NameStr(amForm->amname));
ReleaseSysCache(amTup);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.274 2004/08/29 04:12:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.275 2004/08/29 05:06:41 momjian Exp $
*
*
* INTERFACE ROUTINES
/*
* Never allow a pg_class entry to explicitly specify the database's
- * default tablespace in reltablespace; force it to zero instead.
- * This ensures that if the database is cloned with a different
- * default tablespace, the pg_class entry will still match where
- * CREATE DATABASE will put the physically copied relation.
+ * default tablespace in reltablespace; force it to zero instead. This
+ * ensures that if the database is cloned with a different default
+ * tablespace, the pg_class entry will still match where CREATE
+ * DATABASE will put the physically copied relation.
*
* Yes, this is a bit of a hack.
*/
nailme);
/*
- * have the storage manager create the relation's disk file, if needed.
+ * have the storage manager create the relation's disk file, if
+ * needed.
*/
if (create_storage)
{
/*
* Set the type OID to invalid. A dropped attribute's type link
- * cannot be relied on (once the attribute is dropped, the type might
- * be too). Fortunately we do not need the type row --- the only
- * really essential information is the type's typlen and typalign,
- * which are preserved in the attribute's attlen and attalign. We set
- * atttypid to zero here as a means of catching code that incorrectly
- * expects it to be valid.
+ * cannot be relied on (once the attribute is dropped, the type
+ * might be too). Fortunately we do not need the type row --- the
+ * only really essential information is the type's typlen and
+ * typalign, which are preserved in the attribute's attlen and
+ * attalign. We set atttypid to zero here as a means of catching
+ * code that incorrectly expects it to be valid.
*/
attStruct->atttypid = InvalidOid;
/* We don't want to keep stats for it anymore */
attStruct->attstattarget = 0;
- /* Change the column name to something that isn't likely to conflict */
+ /*
+ * Change the column name to something that isn't likely to
+ * conflict
+ */
snprintf(newattname, sizeof(newattname),
"........pg.dropped.%d........", attnum);
namestrcpy(&(attStruct->attname), newattname);
/*
* Flush the relation from the relcache. We want to do this before
* starting to remove catalog entries, just to be certain that no
- * relcache entry rebuild will happen partway through. (That should
+ * relcache entry rebuild will happen partway through. (That should
* not really matter, since we don't do CommandCounterIncrement here,
* but let's be safe.)
*/
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in check constraint")));
+ errmsg("cannot use subquery in check constraint")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in check constraint")));
+ errmsg("cannot use aggregate function in check constraint")));
/*
* Check name uniqueness, or generate a name if none was given.
if (strcmp((char *) lfirst(cell2), ccname) == 0)
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("check constraint \"%s\" already exists",
- ccname)));
+ errmsg("check constraint \"%s\" already exists",
+ ccname)));
}
}
else
/*
* When generating a name, we want to create "tab_col_check"
* for a column constraint and "tab_check" for a table
- * constraint. We no longer have any info about the
- * syntactic positioning of the constraint phrase, so we
- * approximate this by seeing whether the expression references
- * more than one column. (If the user played by the rules,
- * the result is the same...)
+ * constraint. We no longer have any info about the syntactic
+ * positioning of the constraint phrase, so we approximate
+ * this by seeing whether the expression references more than
+ * one column. (If the user played by the rules, the result
+ * is the same...)
*
- * Note: pull_var_clause() doesn't descend into sublinks,
- * but we eliminated those above; and anyway this only needs
- * to be an approximate answer.
+ * Note: pull_var_clause() doesn't descend into sublinks, but we
+ * eliminated those above; and anyway this only needs to be an
+ * approximate answer.
*/
- List *vars;
- char *colname;
+ List *vars;
+ char *colname;
vars = pull_var_clause(expr, false);
if (contain_var_clause(expr))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("cannot use column references in default expression")));
+ errmsg("cannot use column references in default expression")));
/*
* It can't return a set either.
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in default expression")));
+ errmsg("cannot use aggregate function in default expression")));
/*
* Coerce the expression to the correct type and typmod, if given.
return;
/*
- * Otherwise, must scan pg_constraint. Right now, this is a seqscan
+ * Otherwise, must scan pg_constraint. Right now, this is a seqscan
* because there is no available index on confrelid.
*/
fkeyRel = heap_openr(ConstraintRelationName, AccessShareLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.237 2004/08/29 04:12:27 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.238 2004/08/29 05:06:41 momjian Exp $
*
*
* INTERFACE ROUTINES
* We cannot allow indexing a shared relation after initdb (because
* there's no way to make the entry in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting of shared rels happens after the bootstrap
- * phase, so checking IsBootstrapProcessingMode() won't work). However,
- * we can at least prevent this mistake under normal multi-user operation.
+ * standalone backend (toasting of shared rels happens after the
+ * bootstrap phase, so checking IsBootstrapProcessingMode() won't
+ * work). However, we can at least prevent this mistake under normal
+ * multi-user operation.
*/
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
/*
* Close and flush the index's relcache entry, to ensure relcache
- * doesn't try to rebuild it while we're deleting catalog entries.
- * We keep the lock though.
+ * doesn't try to rebuild it while we're deleting catalog entries. We
+ * keep the lock though.
*/
index_close(userIndexRelation);
heap_close(indexRelation, RowExclusiveLock);
/*
- * if it has any expression columns, we might have stored
- * statistics about them.
+ * if it has any expression columns, we might have stored statistics
+ * about them.
*/
if (hasexprs)
RemoveStatistics(indexId, 0);
/*
* Find the tuple to update in pg_class. In bootstrap mode we can't
- * use heap_update, so cheat and overwrite the tuple in-place. In
+ * use heap_update, so cheat and overwrite the tuple in-place. In
* normal processing, make a copy to scribble on.
*/
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
newrelfilenode = newoid();
/*
- * Find the pg_class tuple for the given relation. This is not used
+ * Find the pg_class tuple for the given relation. This is not used
* during bootstrap, so okay to use heap_update always.
*/
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(relation)),
+ ObjectIdGetDatum(RelationGetRelid(relation)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "could not find tuple for relation %u",
/*
* Find the tuple to update in pg_class. Normally we make a copy of
- * the tuple using the syscache, modify it, and apply heap_update.
- * But in bootstrap mode we can't use heap_update, so we cheat and
+ * the tuple using the syscache, modify it, and apply heap_update. But
+ * in bootstrap mode we can't use heap_update, so we cheat and
* overwrite the tuple in-place.
*
- * We also must cheat if reindexing pg_class itself, because the
- * target index may presently not be part of the set of indexes that
+ * We also must cheat if reindexing pg_class itself, because the target
+ * index may presently not be part of the set of indexes that
* CatalogUpdateIndexes would update (see reindex_relation). In this
* case the stats updates will not be WAL-logged and so could be lost
- * in a crash. This seems OK considering VACUUM does the same thing.
+ * in a crash. This seems OK considering VACUUM does the same thing.
*/
pg_class = heap_openr(RelationRelationName, RowExclusiveLock);
scan = heap_beginscan(heapRelation, /* relation */
snapshot, /* seeself */
0, /* number of keys */
- NULL); /* scan key */
+ NULL); /* scan key */
reltuples = 0;
* system catalogs before committing.
*/
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmin(heapTuple->t_data))
+ HeapTupleHeaderGetXmin(heapTuple->t_data))
&& !IsSystemRelation(heapRelation))
elog(ERROR, "concurrent insert in progress");
indexIt = true;
* system catalogs before committing.
*/
if (!TransactionIdIsCurrentTransactionId(
- HeapTupleHeaderGetXmax(heapTuple->t_data))
+ HeapTupleHeaderGetXmax(heapTuple->t_data))
&& !IsSystemRelation(heapRelation))
elog(ERROR, "concurrent delete in progress");
indexIt = true;
* Note: for REINDEX INDEX, doing this before opening the parent heap
* relation means there's a possibility for deadlock failure against
* another xact that is doing normal accesses to the heap and index.
- * However, it's not real clear why you'd be wanting to do REINDEX INDEX
- * on a table that's in active use, so I'd rather have the protection of
- * making sure the index is locked down. In the REINDEX TABLE and
- * REINDEX DATABASE cases, there is no problem because caller already
- * holds exclusive lock on the parent table.
+ * However, it's not real clear why you'd be wanting to do REINDEX
+ * INDEX on a table that's in active use, so I'd rather have the
+ * protection of making sure the index is locked down. In the REINDEX
+ * TABLE and REINDEX DATABASE cases, there is no problem because
+ * caller already holds exclusive lock on the parent table.
*/
iRel = index_open(indexId);
LockRelation(iRel, AccessExclusiveLock);
* we can do it the normal transaction-safe way.
*
* Since inplace processing isn't crash-safe, we only allow it in a
- * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE cases,
- * the caller should have detected this.)
+ * standalone backend. (In the REINDEX TABLE and REINDEX DATABASE
+ * cases, the caller should have detected this.)
*/
inplace = iRel->rd_rel->relisshared;
{
/*
* Release any buffers associated with this index. If they're
- * dirty, they're just dropped without bothering to flush to disk.
+ * dirty, they're just dropped without bothering to flush to
+ * disk.
*/
DropRelationBuffers(iRel);
index_build(heapRelation, iRel, indexInfo);
/*
- * index_build will close both the heap and index relations (but not
- * give up the locks we hold on them). So we're done.
+ * index_build will close both the heap and index relations (but
+ * not give up the locks we hold on them). So we're done.
*/
}
PG_CATCH();
/*
* reindex_index will attempt to update the pg_class rows for the
- * relation and index. If we are processing pg_class itself, we
- * want to make sure that the updates do not try to insert index
- * entries into indexes we have not processed yet. (When we are
- * trying to recover from corrupted indexes, that could easily
- * cause a crash.) We can accomplish this because CatalogUpdateIndexes
- * will use the relcache's index list to know which indexes to update.
- * We just force the index list to be only the stuff we've processed.
+ * relation and index. If we are processing pg_class itself, we want
+ * to make sure that the updates do not try to insert index entries
+ * into indexes we have not processed yet. (When we are trying to
+ * recover from corrupted indexes, that could easily cause a crash.)
+ * We can accomplish this because CatalogUpdateIndexes will use the
+ * relcache's index list to know which indexes to update. We just
+ * force the index list to be only the stuff we've processed.
*
* It is okay to not insert entries into the indexes we have not
* processed yet because all of this is transaction-safe. If we fail
/* Reindex all the indexes. */
foreach(indexId, indexIds)
{
- Oid indexOid = lfirst_oid(indexId);
+ Oid indexOid = lfirst_oid(indexId);
if (is_pg_class)
RelationSetIndexList(rel, doneIndexes);
result = (indexIds != NIL);
/*
- * If the relation has a secondary toast rel, reindex that too while we
- * still hold the lock on the master table.
+ * If the relation has a secondary toast rel, reindex that too while
+ * we still hold the lock on the master table.
*/
if (toast_too && OidIsValid(toast_relid))
result |= reindex_relation(toast_relid, false);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.69 2004/08/29 04:12:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.70 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (strcmp(relation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- relation->catalogname, relation->schemaname,
- relation->relname)));
+ errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
+ relation->catalogname, relation->schemaname,
+ relation->relname)));
}
if (relation->schemaname)
if (strcmp(newRelation->catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
- newRelation->catalogname, newRelation->schemaname,
- newRelation->relname)));
+ errmsg("cross-database references are not implemented: \"%s.%s.%s\"",
+ newRelation->catalogname, newRelation->schemaname,
+ newRelation->relname)));
}
if (newRelation->istemp)
if (newRelation->schemaname)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("temporary tables may not specify a schema name")));
+ errmsg("temporary tables may not specify a schema name")));
/* Initialize temp namespace if first time through */
if (!OidIsValid(myTempNamespace))
InitTempTableNamespace();
/*
* In typical scenarios, most if not all of the operators found by the
- * catcache search will end up getting returned; and there can be quite
- * a few, for common operator names such as '=' or '+'. To reduce the
- * time spent in palloc, we allocate the result space as an array large
- * enough to hold all the operators. The original coding of this routine
- * did a separate palloc for each operator, but profiling revealed that
- * the pallocs used an unreasonably large fraction of parsing time.
+ * catcache search will end up getting returned; and there can be
+ * quite a few, for common operator names such as '=' or '+'. To
+ * reduce the time spent in palloc, we allocate the result space as an
+ * array large enough to hold all the operators. The original coding
+ * of this routine did a separate palloc for each operator, but
+ * profiling revealed that the pallocs used an unreasonably large
+ * fraction of parsing time.
*/
#define SPACE_PER_OP MAXALIGN(sizeof(struct _FuncCandidateList) + sizeof(Oid))
if (strcmp(catalogname, get_database_name(MyDatabaseId)) != 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented: %s",
- NameListToString(names))));
+ errmsg("cross-database references are not implemented: %s",
+ NameListToString(names))));
break;
default:
ereport(ERROR,
* tables. We use a nonstandard error message here since
* "databasename: permission denied" might be a tad cryptic.
*
- * Note that ACL_CREATE_TEMP rights are rechecked in pg_namespace_aclmask;
- * that's necessary since current user ID could change during the session.
- * But there's no need to make the namespace in the first place until a
- * temp table creation request is made by someone with appropriate rights.
+ * Note that ACL_CREATE_TEMP rights are rechecked in
+ * pg_namespace_aclmask; that's necessary since current user ID could
+ * change during the session. But there's no need to make the
+ * namespace in the first place until a temp table creation request is
+ * made by someone with appropriate rights.
*/
if (pg_database_aclcheck(MyDatabaseId, GetUserId(),
ACL_CREATE_TEMP) != ACLCHECK_OK)
* ALTER DATABASE SET or ALTER USER SET command. It could be that
* the intended use of the search path is for some other database,
* so we should not error out if it mentions schemas not present
- * in the current database. We reduce the message to NOTICE instead.
+ * in the current database. We reduce the message to NOTICE
+ * instead.
*/
foreach(l, namelist)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.67 2004/08/29 04:12:28 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.68 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot determine transition data type"),
- errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
- "transition type must have one of them as its base type.")));
+ errdetail("An aggregate using \"anyarray\" or \"anyelement\" as "
+ "transition type must have one of them as its base type.")));
/* handle transfn */
MemSet(fnArgs, 0, FUNC_MAX_ARGS * sizeof(Oid));
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot determine result data type"),
- errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
- "must have one of them as its base type.")));
+ errdetail("An aggregate returning \"anyarray\" or \"anyelement\" "
+ "must have one of them as its base type.")));
/*
* Everything looks okay. Try to create the pg_proc entry for the
PROVOLATILE_IMMUTABLE, /* volatility (not
* needed for agg) */
1, /* parameterCount */
- fnArgs, /* parameterTypes */
- NULL); /* parameterNames */
+ fnArgs, /* parameterTypes */
+ NULL); /* parameterNames */
/*
* Okay to create the pg_aggregate entry.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.118 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.119 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Datum fmgr_sql_validator(PG_FUNCTION_ARGS);
static Datum create_parameternames_array(int parameterCount,
- const char *parameterNames[]);
+ const char *parameterNames[]);
static void sql_function_parse_error_callback(void *arg);
-static int match_prosrc_to_query(const char *prosrc, const char *queryText,
- int cursorpos);
+static int match_prosrc_to_query(const char *prosrc, const char *queryText,
+ int cursorpos);
static bool match_prosrc_to_literal(const char *prosrc, const char *literal,
- int cursorpos, int *newcursorpos);
+ int cursorpos, int *newcursorpos);
/* ----------------------------------------------------------------
values[i++] = UInt16GetDatum(parameterCount); /* pronargs */
values[i++] = ObjectIdGetDatum(returnType); /* prorettype */
values[i++] = PointerGetDatum(typev); /* proargtypes */
- values[i++] = namesarray; /* proargnames */
+ values[i++] = namesarray; /* proargnames */
if (namesarray == PointerGetDatum(NULL))
nulls[Anum_pg_proc_proargnames - 1] = 'n';
values[i++] = DirectFunctionCall1(textin, /* prosrc */
if (!parameterNames)
return PointerGetDatum(NULL);
- for (i=0; i<parameterCount; i++)
+ for (i = 0; i < parameterCount; i++)
{
const char *s = parameterNames[i];
}
/*
- * Otherwise assume we are returning the whole tuple. Crosschecking
- * against what the caller expects will happen at runtime.
+ * Otherwise assume we are returning the whole tuple.
+ * Crosschecking against what the caller expects will happen at
+ * runtime.
*/
return true;
}
char *probin;
/*
- * It'd be most consistent to skip the check if !check_function_bodies,
- * but the purpose of that switch is to be helpful for pg_dump loading,
- * and for pg_dump loading it's much better if we *do* check.
+ * It'd be most consistent to skip the check if
+ * !check_function_bodies, but the purpose of that switch is to be
+ * helpful for pg_dump loading, and for pg_dump loading it's much
+ * better if we *do* check.
*/
tuple = SearchSysCache(PROCOID,
error_context_stack = &sqlerrcontext;
/*
- * We can't do full prechecking of the function definition if there
- * are any polymorphic input types, because actual datatypes of
- * expression results will be unresolvable. The check will be done
- * at runtime instead.
+ * We can't do full prechecking of the function definition if
+ * there are any polymorphic input types, because actual datatypes
+ * of expression results will be unresolvable. The check will be
+ * done at runtime instead.
*
* We can run the text through the raw parser though; this will at
* least catch silly syntactic errors.
/*
* Adjust a syntax error occurring inside the function body of a CREATE
* FUNCTION command. This can be used by any function validator, not only
- * for SQL-language functions. It is assumed that the syntax error position
+ * for SQL-language functions. It is assumed that the syntax error position
* is initially relative to the function body string (as passed in). If
* possible, we adjust the position to reference the original CREATE command;
* if we can't manage that, we set up an "internal query" syntax error instead.
const char *queryText;
/*
- * Nothing to do unless we are dealing with a syntax error that has
- * a cursor position.
+ * Nothing to do unless we are dealing with a syntax error that has a
+ * cursor position.
*
- * Some PLs may prefer to report the error position as an internal
- * error to begin with, so check that too.
+ * Some PLs may prefer to report the error position as an internal error
+ * to begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
* (though not in any very probable scenarios), so fail if we find
* more than one match.
*/
- int prosrclen = strlen(prosrc);
- int querylen = strlen(queryText);
- int matchpos = 0;
- int curpos;
- int newcursorpos;
+ int prosrclen = strlen(prosrc);
+ int querylen = strlen(queryText);
+ int matchpos = 0;
+ int curpos;
+ int newcursorpos;
- for (curpos = 0; curpos < querylen-prosrclen; curpos++)
+ for (curpos = 0; curpos < querylen - prosrclen; curpos++)
{
if (queryText[curpos] == '$' &&
- strncmp(prosrc, &queryText[curpos+1], prosrclen) == 0 &&
- queryText[curpos+1+prosrclen] == '$')
+ strncmp(prosrc, &queryText[curpos + 1], prosrclen) == 0 &&
+ queryText[curpos + 1 + prosrclen] == '$')
{
/*
* Found a $foo$ match. Since there are no embedded quoting
*/
if (matchpos)
return 0; /* multiple matches, fail */
- matchpos = pg_mbstrlen_with_len(queryText, curpos+1)
+ matchpos = pg_mbstrlen_with_len(queryText, curpos + 1)
+ cursorpos;
}
else if (queryText[curpos] == '\'' &&
- match_prosrc_to_literal(prosrc, &queryText[curpos+1],
+ match_prosrc_to_literal(prosrc, &queryText[curpos + 1],
cursorpos, &newcursorpos))
{
/*
- * Found a 'foo' match. match_prosrc_to_literal() has adjusted
- * for any quotes or backslashes embedded in the literal.
+ * Found a 'foo' match. match_prosrc_to_literal() has
+ * adjusted for any quotes or backslashes embedded in the
+ * literal.
*/
if (matchpos)
return 0; /* multiple matches, fail */
- matchpos = pg_mbstrlen_with_len(queryText, curpos+1)
+ matchpos = pg_mbstrlen_with_len(queryText, curpos + 1)
+ newcursorpos;
}
}
/*
* This implementation handles backslashes and doubled quotes in the
- * string literal. It does not handle the SQL syntax for literals
+ * string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
- * We do the comparison a character at a time, not a byte at a time,
- * so that we can do the correct cursorpos math.
+ * We do the comparison a character at a time, not a byte at a time, so
+ * that we can do the correct cursorpos math.
*/
while (*prosrc)
{
cursorpos--; /* characters left before cursor */
+
/*
* Check for backslashes and doubled quotes in the literal; adjust
* newcp when one is found before the cursor.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.95 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/catalog/pg_type.c,v 1.96 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(internalSize <= 0 || internalSize > (int16) sizeof(Datum)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("internal size %d is invalid for passed-by-value type",
- internalSize)));
+ errmsg("internal size %d is invalid for passed-by-value type",
+ internalSize)));
/* Only varlena types can be toasted */
if (storage != 'p' && internalSize != -1)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.20 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.21 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
/*
* if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a basetype
- * of ANYOID. This means that the aggregate applies to all basetypes
- * (eg, COUNT).
+ * that specific type; else attempt to find an aggregate with a
+ * basetype of ANYOID. This means that the aggregate applies to all
+ * basetypes (eg, COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
/*
* if a basetype is passed in, then attempt to find an aggregate for
- * that specific type; else attempt to find an aggregate with a basetype
- * of ANYOID. This means that the aggregate applies to all basetypes
- * (eg, COUNT).
+ * that specific type; else attempt to find an aggregate with a
+ * basetype of ANYOID. This means that the aggregate applies to all
+ * basetypes (eg, COUNT).
*/
if (basetype)
basetypeOid = typenameTypeId(basetype);
elog(ERROR, "cache lookup failed for function %u", procOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
procForm->proowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.10 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/alter.c,v 1.11 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
+ * Executes an ALTER OBJECT / RENAME TO statement. Based on the object
* type, the function appropriate to that type is executed.
*/
void
void
ExecAlterOwnerStmt(AlterOwnerStmt *stmt)
{
- AclId newowner = get_usesysid(stmt->newowner);
+ AclId newowner = get_usesysid(stmt->newowner);
switch (stmt->objectType)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.75 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.76 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Data structure for Algorithm S from Knuth 3.4.2 */
typedef struct
{
- BlockNumber N; /* number of blocks, known in advance */
+ BlockNumber N; /* number of blocks, known in advance */
int n; /* desired sample size */
- BlockNumber t; /* current block number */
+ BlockNumber t; /* current block number */
int m; /* blocks selected so far */
} BlockSamplerData;
typedef BlockSamplerData *BlockSampler;
static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
- int samplesize);
+ int samplesize);
static bool BlockSampler_HasMore(BlockSampler bs);
static BlockNumber BlockSampler_Next(BlockSampler bs);
static void compute_index_stats(Relation onerel, double totalrows,
- AnlIndexData *indexdata, int nindexes,
- HeapTuple *rows, int numrows,
- MemoryContext col_context);
+ AnlIndexData *indexdata, int nindexes,
+ HeapTuple *rows, int numrows,
+ MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum);
static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
int targrows, double *totalrows);
}
/*
- * Check that it's a plain table; we used to do this in
- * get_rel_oids() but seems safer to check after we've locked the
- * relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids()
+ * but seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != RELKIND_RELATION)
{
}
/*
- * Open all indexes of the relation, and see if there are any analyzable
- * columns in the indexes. We do not analyze index columns if there was
- * an explicit column list in the ANALYZE command, however.
+ * Open all indexes of the relation, and see if there are any
+ * analyzable columns in the indexes. We do not analyze index columns
+ * if there was an explicit column list in the ANALYZE command,
+ * however.
*/
vac_open_indexes(onerel, &nindexes, &Irel);
hasindex = (nindexes > 0);
for (ind = 0; ind < nindexes; ind++)
{
AnlIndexData *thisdata = &indexdata[ind];
- IndexInfo *indexInfo;
+ IndexInfo *indexInfo;
thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
- thisdata->tupleFract = 1.0; /* fix later if partial */
+ thisdata->tupleFract = 1.0; /* fix later if partial */
if (indexInfo->ii_Expressions != NIL && vacstmt->va_cols == NIL)
{
ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
/* Found an index expression */
Node *indexkey;
- if (indexpr_item == NULL) /* shouldn't happen */
+ if (indexpr_item == NULL) /* shouldn't happen */
elog(ERROR, "too few entries in indexprs list");
indexkey = (Node *) lfirst(indexpr_item);
indexpr_item = lnext(indexpr_item);
/*
- * Can't analyze if the opclass uses a storage type
- * different from the expression result type. We'd
- * get confused because the type shown in pg_attribute
- * for the index column doesn't match what we are
- * getting from the expression. Perhaps this can be
- * fixed someday, but for now, punt.
+ * Can't analyze if the opclass uses a storage
+ * type different from the expression result type.
+ * We'd get confused because the type shown in
+ * pg_attribute for the index column doesn't match
+ * what we are getting from the expression.
+ * Perhaps this can be fixed someday, but for now,
+ * punt.
*/
if (exprType(indexkey) !=
Irel[ind]->rd_att->attrs[i]->atttypid)
continue;
thisdata->vacattrstats[tcnt] =
- examine_attribute(Irel[ind], i+1);
+ examine_attribute(Irel[ind], i + 1);
if (thisdata->vacattrstats[tcnt] != NULL)
{
tcnt++;
/*
* If we are running a standalone ANALYZE, update pages/tuples stats
- * in pg_class. We know the accurate page count from the smgr,
- * but only an approximate number of tuples; therefore, if we are part
- * of VACUUM ANALYZE do *not* overwrite the accurate count already
- * inserted by VACUUM. The same consideration applies to indexes.
+ * in pg_class. We know the accurate page count from the smgr, but
+ * only an approximate number of tuples; therefore, if we are part of
+ * VACUUM ANALYZE do *not* overwrite the accurate count already
+ * inserted by VACUUM. The same consideration applies to indexes.
*/
if (!vacstmt->vacuum)
{
MemoryContext col_context)
{
MemoryContext ind_context,
- old_context;
+ old_context;
TupleDesc heapDescriptor;
Datum attdata[INDEX_MAX_KEYS];
char nulls[INDEX_MAX_KEYS];
for (ind = 0; ind < nindexes; ind++)
{
AnlIndexData *thisdata = &indexdata[ind];
- IndexInfo *indexInfo = thisdata->indexInfo;
+ IndexInfo *indexInfo = thisdata->indexInfo;
int attr_cnt = thisdata->attr_cnt;
TupleTable tupleTable;
TupleTableSlot *slot;
if (attr_cnt > 0)
{
/*
- * Evaluate the index row to compute expression values.
- * We could do this by hand, but FormIndexDatum is convenient.
+ * Evaluate the index row to compute expression values. We
+ * could do this by hand, but FormIndexDatum is
+ * convenient.
*/
FormIndexDatum(indexInfo,
heapTuple,
estate,
attdata,
nulls);
+
/*
* Save just the columns we care about.
*/
for (i = 0; i < attr_cnt; i++)
{
VacAttrStats *stats = thisdata->vacattrstats[i];
- int attnum = stats->attr->attnum;
+ int attnum = stats->attr->attnum;
- exprvals[tcnt] = attdata[attnum-1];
- exprnulls[tcnt] = (nulls[attnum-1] == 'n');
+ exprvals[tcnt] = attdata[attnum - 1];
+ exprnulls[tcnt] = (nulls[attnum - 1] == 'n');
tcnt++;
}
}
/*
* Having counted the number of rows that pass the predicate in
- * the sample, we can estimate the total number of rows in the index.
+ * the sample, we can estimate the total number of rows in the
+ * index.
*/
thisdata->tupleFract = (double) numindexrows / (double) numrows;
totalindexrows = ceil(thisdata->tupleFract * totalrows);
stats->tupattnum = attnum;
/*
- * Call the type-specific typanalyze function. If none is specified,
+ * Call the type-specific typanalyze function. If none is specified,
* use std_typanalyze().
*/
if (OidIsValid(stats->attrtype->typanalyze))
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize)
{
bs->N = nblocks; /* measured table size */
+
/*
- * If we decide to reduce samplesize for tables that have less or
- * not much more than samplesize blocks, here is the place to do
- * it.
+ * If we decide to reduce samplesize for tables that have less or not
+ * much more than samplesize blocks, here is the place to do it.
*/
bs->n = samplesize;
bs->t = 0; /* blocks scanned so far */
static BlockNumber
BlockSampler_Next(BlockSampler bs)
{
- BlockNumber K = bs->N - bs->t; /* remaining blocks */
+ BlockNumber K = bs->N - bs->t; /* remaining blocks */
int k = bs->n - bs->m; /* blocks still to sample */
- double p; /* probability to skip block */
- double V; /* random */
+ double p; /* probability to skip block */
+ double V; /* random */
Assert(BlockSampler_HasMore(bs)); /* hence K > 0 and k > 0 */
* If we are to skip, we should advance t (hence decrease K), and
* repeat the same probabilistic test for the next block. The naive
* implementation thus requires a random_fract() call for each block
- * number. But we can reduce this to one random_fract() call per
+ * number. But we can reduce this to one random_fract() call per
* selected block, by noting that each time the while-test succeeds,
* we can reinterpret V as a uniform random number in the range 0 to p.
* Therefore, instead of choosing a new V, we just adjust p to be
acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
double *totalrows)
{
- int numrows = 0; /* # rows collected */
- double liverows = 0; /* # rows seen */
+ int numrows = 0; /* # rows collected */
+ double liverows = 0; /* # rows seen */
double deadrows = 0;
- double rowstoskip = -1; /* -1 means not set yet */
- BlockNumber totalblocks;
+ double rowstoskip = -1; /* -1 means not set yet */
+ BlockNumber totalblocks;
BlockSamplerData bs;
double rstate;
{
/*
* The first targrows live rows are simply copied into the
- * reservoir.
- * Then we start replacing tuples in the sample until
- * we reach the end of the relation. This algorithm is
- * from Jeff Vitter's paper (see full citation below).
+ * reservoir. Then we start replacing tuples in the sample
+ * until we reach the end of the relation. This algorithm
+ * is from Jeff Vitter's paper (see full citation below).
* It works by repeatedly computing the number of tuples
* to skip before selecting a tuple, which replaces a
- * randomly chosen element of the reservoir (current
- * set of tuples). At all times the reservoir is a true
+ * randomly chosen element of the reservoir (current set
+ * of tuples). At all times the reservoir is a true
* random sample of the tuples we've passed over so far,
* so when we fall off the end of the relation we're done.
*/
else
{
/*
- * t in Vitter's paper is the number of records already
- * processed. If we need to compute a new S value, we
- * must use the not-yet-incremented value of liverows
- * as t.
+ * t in Vitter's paper is the number of records
+ * already processed. If we need to compute a new S
+ * value, we must use the not-yet-incremented value of
+ * liverows as t.
*/
if (rowstoskip < 0)
rowstoskip = get_next_S(liverows, targrows, &rstate);
if (rowstoskip <= 0)
{
/*
- * Found a suitable tuple, so save it,
- * replacing one old tuple at random
+ * Found a suitable tuple, so save it, replacing
+ * one old tuple at random
*/
- int k = (int) (targrows * random_fract());
+ int k = (int) (targrows * random_fract());
Assert(k >= 0 && k < targrows);
heap_freetuple(rows[k]);
else
{
/*
- * Count dead rows, but not empty slots. This information is
- * currently not used, but it seems likely we'll want it
- * someday.
+ * Count dead rows, but not empty slots. This information
+ * is currently not used, but it seems likely we'll want
+ * it someday.
*/
if (targtuple.t_data != NULL)
deadrows += 1;
}
/*
- * If we didn't find as many tuples as we wanted then we're done.
- * No sort is needed, since they're already in order.
+ * If we didn't find as many tuples as we wanted then we're done. No
+ * sort is needed, since they're already in order.
*
* Otherwise we need to sort the collected tuples by position
- * (itempointer). It's not worth worrying about corner cases
- * where the tuples are already sorted.
+ * (itempointer). It's not worth worrying about corner cases where
+ * the tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
*totalrows = 0.0;
/*
- * Emit some interesting relation info
+ * Emit some interesting relation info
*/
ereport(elevel,
(errmsg("\"%s\": scanned %d of %u pages, "
i = 0;
values[i++] = ObjectIdGetDatum(relid); /* starelid */
- values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
- values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
+ values[i++] = Int16GetDatum(stats->attr->attnum); /* staattnum */
+ values[i++] = Float4GetDatum(stats->stanullfrac); /* stanullfrac */
values[i++] = Int32GetDatum(stats->stawidth); /* stawidth */
- values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
+ values[i++] = Float4GetDatum(stats->stadistinct); /* stadistinct */
for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
{
values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
static void compute_minimal_stats(VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
static void compute_scalar_stats(VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
static int compare_scalars(const void *a, const void *b);
static int compare_mcvs(const void *a, const void *b);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.114 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.115 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
static List *pendingNotifies = NIL;
-static List *upperPendingNotifies = NIL; /* list of upper-xact lists */
+static List *upperPendingNotifies = NIL; /* list of upper-xact
+ * lists */
/*
* State for inbound notifies consists of two flags: one saying whether
rTuple = heap_modifytuple(lTuple, lRel,
value, nulls, repl);
+
/*
* We cannot use simple_heap_update here because the tuple
* could have been modified by an uncommitted transaction;
* specifically, since UNLISTEN releases exclusive lock on
- * the table before commit, the other guy could already have
- * tried to unlisten. There are no other cases where we
- * should be able to see an uncommitted update or delete.
- * Therefore, our response to a HeapTupleBeingUpdated result
- * is just to ignore it. We do *not* wait for the other
- * guy to commit --- that would risk deadlock, and we don't
- * want to block while holding the table lock anyway for
- * performance reasons. We also ignore HeapTupleUpdated,
- * which could occur if the other guy commits between our
- * heap_getnext and heap_update calls.
+ * the table before commit, the other guy could already
+ * have tried to unlisten. There are no other cases where
+ * we should be able to see an uncommitted update or
+ * delete. Therefore, our response to a
+ * HeapTupleBeingUpdated result is just to ignore it. We
+ * do *not* wait for the other guy to commit --- that
+ * would risk deadlock, and we don't want to block while
+ * holding the table lock anyway for performance reasons.
+ * We also ignore HeapTupleUpdated, which could occur if
+ * the other guy commits between our heap_getnext and
+ * heap_update calls.
*/
result = heap_update(lRel, &lTuple->t_self, rTuple,
&ctid,
GetCurrentCommandId(), SnapshotAny,
- false /* no wait for commit */);
+ false /* no wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
void
AtSubStart_Notify(void)
{
- MemoryContext old_cxt;
+ MemoryContext old_cxt;
/* Keep the list-of-lists in TopTransactionContext for simplicity */
old_cxt = MemoryContextSwitchTo(TopTransactionContext);
void
AtSubCommit_Notify(void)
{
- List *parentPendingNotifies;
+ List *parentPendingNotifies;
parentPendingNotifies = (List *) linitial(upperPendingNotifies);
upperPendingNotifies = list_delete_first(upperPendingNotifies);
/*
- * We could try to eliminate duplicates here, but it seems not worthwhile.
+ * We could try to eliminate duplicates here, but it seems not
+ * worthwhile.
*/
pendingNotifies = list_concat(parentPendingNotifies, pendingNotifies);
}
bool
DisableNotifyInterrupt(void)
{
- bool result = (notifyInterruptEnabled != 0);
+ bool result = (notifyInterruptEnabled != 0);
notifyInterruptEnabled = 0;
relname, (int) sourcePID);
NotifyMyFrontEnd(relname, sourcePID);
+
/*
* Rewrite the tuple with 0 in notification column.
*
- * simple_heap_update is safe here because no one else would
- * have tried to UNLISTEN us, so there can be no uncommitted
+ * simple_heap_update is safe here because no one else would have
+ * tried to UNLISTEN us, so there can be no uncommitted
* changes.
*/
rTuple = heap_modifytuple(lTuple, lRel, value, nulls, repl);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.128 2004/08/29 04:12:29 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/cluster.c,v 1.129 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* We grab exclusive access to the target rel and index for the
* duration of the transaction. (This is redundant for the single-
- * transaction case, since cluster() already did it.) The index
- * lock is taken inside check_index_is_clusterable.
+ * transaction case, since cluster() already did it.) The index lock
+ * is taken inside check_index_is_clusterable.
*/
OldHeap = heap_open(rvtc->tableOid, AccessExclusiveLock);
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot cluster temporary tables of other sessions")));
+ errmsg("cannot cluster temporary tables of other sessions")));
/* Drop relcache refcnt on OldIndex, but keep lock */
index_close(OldIndex);
foreach(index, RelationGetIndexList(rel))
{
- Oid thisIndexOid = lfirst_oid(index);
+ Oid thisIndexOid = lfirst_oid(index);
indexTuple = SearchSysCacheCopy(INDEXRELID,
ObjectIdGetDatum(thisIndexOid),
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table,
- * which is all-new at this point). We do not need
+ * Rebuild each index on the relation (but not the toast table, which
+ * is all-new at this point). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tableOid, false);
OIDNewHeap = heap_create_with_catalog(NewName,
RelationGetNamespace(OldHeap),
- NewTableSpace,
+ NewTableSpace,
tupdesc,
OldHeap->rd_rel->relkind,
OldHeap->rd_rel->relisshared,
* their new owning relations. Otherwise the wrong one will get
* dropped ...
*
- * NOTE: it is possible that only one table has a toast table; this
- * can happen in CLUSTER if there were dropped columns in the old table,
+ * NOTE: it is possible that only one table has a toast table; this can
+ * happen in CLUSTER if there were dropped columns in the old table,
* and in ALTER TABLE when adding or changing type of columns.
*
* NOTE: at present, a TOAST table's only dependency is the one on its
/*
* Blow away the old relcache entries now. We need this kluge because
* relcache.c keeps a link to the smgr relation for the physical file,
- * and that will be out of date as soon as we do CommandCounterIncrement.
- * Whichever of the rels is the second to be cleared during cache
- * invalidation will have a dangling reference to an already-deleted smgr
- * relation. Rather than trying to avoid this by ordering operations
- * just so, it's easiest to not have the relcache entries there at all.
- * (Fortunately, since one of the entries is local in our transaction,
- * it's sufficient to clear out our own relcache this way; the problem
- * cannot arise for other backends when they see our update on the
- * non-local relation.)
+ * and that will be out of date as soon as we do
+ * CommandCounterIncrement. Whichever of the rels is the second to be
+ * cleared during cache invalidation will have a dangling reference to
+ * an already-deleted smgr relation. Rather than trying to avoid this
+ * by ordering operations just so, it's easiest to not have the
+ * relcache entries there at all. (Fortunately, since one of the
+ * entries is local in our transaction, it's sufficient to clear out
+ * our own relcache this way; the problem cannot arise for other
+ * backends when they see our update on the non-local relation.)
*/
RelationForgetRelation(r1);
RelationForgetRelation(r2);
* Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.78 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.79 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
CommentOpClass(stmt->objname, stmt->objargs, stmt->comment);
break;
case OBJECT_LARGEOBJECT:
- CommentLargeObject(stmt->objname, stmt->comment);
+ CommentLargeObject(stmt->objname, stmt->comment);
break;
case OBJECT_CAST:
- CommentCast(stmt->objname, stmt->objargs, stmt->comment);
+ CommentCast(stmt->objname, stmt->objargs, stmt->comment);
break;
default:
elog(ERROR, "unrecognized object type: %d",
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- attrname, RelationGetRelationName(relation))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ attrname, RelationGetRelationName(relation))));
/* Create the comment using the relation's oid */
/* Only allow comments on the current database */
if (oid != MyDatabaseId)
{
- ereport(WARNING, /* throw just a warning so pg_restore doesn't fail */
+ ereport(WARNING, /* throw just a warning so pg_restore
+ * doesn't fail */
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("database comments may only be applied to the current database")));
return;
ForwardScanDirection)))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("there are multiple rules named \"%s\"", rulename),
+ errmsg("there are multiple rules named \"%s\"", rulename),
errhint("Specify a relation name as well as a rule name.")));
heap_endscan(scanDesc);
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("rule \"%s\" for relation \"%s\" does not exist",
- rulename, RelationGetRelationName(relation))));
+ errmsg("rule \"%s\" for relation \"%s\" does not exist",
+ rulename, RelationGetRelationName(relation))));
Assert(reloid == ((Form_pg_rewrite) GETSTRUCT(tuple))->ev_class);
ruleoid = HeapTupleGetOid(tuple);
ReleaseSysCache(tuple);
if (!HeapTupleIsValid(triggertuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- trigname, RelationGetRelationName(relation))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ trigname, RelationGetRelationName(relation))));
oid = HeapTupleGetOid(triggertuple);
if (!OidIsValid(conOid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("constraint \"%s\" for table \"%s\" does not exist",
- conName, RelationGetRelationName(relation))));
+ errmsg("constraint \"%s\" for table \"%s\" does not exist",
+ conName, RelationGetRelationName(relation))));
/* Create the comment with the pg_constraint oid */
CreateComments(conOid, RelationGetRelid(pg_constraint), 0, comment);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to comment on procedural language")));
+ errmsg("must be superuser to comment on procedural language")));
/* pg_language doesn't have a hard-coded OID, so must look it up */
classoid = get_system_catalog_relid(LanguageRelationName);
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("operator class \"%s\" does not exist for access method \"%s\"",
- NameListToString(qualname), amname)));
+ NameListToString(qualname), amname)));
opcID = HeapTupleGetOid(tuple);
{
Oid loid;
Oid classoid;
- Node *node;
+ Node *node;
Assert(list_length(qualname) == 1);
node = (Node *) linitial(qualname);
loid = intVal(node);
break;
case T_Float:
+
/*
* Values too large for int4 will be represented as Float
- * constants by the lexer. Accept these if they are valid
- * OID strings.
+ * constants by the lexer. Accept these if they are valid OID
+ * strings.
*/
loid = DatumGetObjectId(DirectFunctionCall1(oidin,
- CStringGetDatum(strVal(node))));
+ CStringGetDatum(strVal(node))));
break;
default:
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(node));
/* keep compiler quiet */
- loid = InvalidOid;
+ loid = InvalidOid;
}
/* check that the large object exists */
classoid = get_system_catalog_relid(LargeObjectRelationName);
/* Call CreateComments() to create/drop the comments */
- CreateComments(loid, classoid, 0, comment);
+ CreateComments(loid, classoid, 0, comment);
}
/*
Assert(list_length(arguments) == 1);
targettype = (TypeName *) linitial(arguments);
Assert(IsA(targettype, TypeName));
-
+
sourcetypeid = typenameTypeId(sourcetype);
if (!OidIsValid(sourcetypeid))
ereport(ERROR,
/* Get the OID of the cast */
castOid = HeapTupleGetOid(tuple);
-
+
/* Permission check */
if (!pg_type_ownercheck(sourcetypeid, GetUserId())
&& !pg_type_ownercheck(targettypeid, GetUserId()))
classoid = get_system_catalog_relid(CastRelationName);
/* Call CreateComments() to create/drop the comments */
- CreateComments(castOid, classoid, 0, comment);
+ CreateComments(castOid, classoid, 0, comment);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.14 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.15 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid conversionOid;
HeapTuple tup;
Relation rel;
- Form_pg_conversion convForm;
+ Form_pg_conversion convForm;
rel = heap_openr(ConversionRelationName, RowExclusiveLock);
convForm = (Form_pg_conversion) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
convForm->conowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.229 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/copy.c,v 1.230 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *delim, char *null_print, bool csv_mode, char *quote,
char *escape, List *force_quote_atts, bool fe_copy);
static void CopyTo(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
+ char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
List *force_quote_atts);
static void CopyFrom(Relation rel, List *attnumlist, bool binary, bool oids,
- char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
+ char *delim, char *null_print, bool csv_mode, char *quote, char *escape,
List *force_notnull_atts);
static bool CopyReadLine(void);
static char *CopyReadAttribute(const char *delim, const char *null_print,
- CopyReadResult *result, bool *isnull);
+ CopyReadResult *result, bool *isnull);
static char *CopyReadAttributeCSV(const char *delim, const char *null_print,
- char *quote, char *escape,
- CopyReadResult *result, bool *isnull);
+ char *quote, char *escape,
+ CopyReadResult *result, bool *isnull);
static Datum CopyReadBinaryAttribute(int column_no, FmgrInfo *flinfo,
Oid typioparam, bool *isnull);
static void CopyAttributeOut(char *string, char *delim);
static void CopyAttributeOutCSV(char *string, char *delim, char *quote,
- char *escape, bool force_quote);
+ char *escape, bool force_quote);
static List *CopyGetAttnums(Relation rel, List *attnamelist);
static void limit_printout_length(StringInfo buf);
/* Try to receive another message */
int mtype;
- readmessage:
+ readmessage:
mtype = pq_getbyte();
if (mtype == EOF)
ereport(ERROR,
break;
case 'H': /* Flush */
case 'S': /* Sync */
+
/*
* Ignore Flush/Sync for the convenience of
* client libraries (such as libpq) that may
- * send those without noticing that the command
- * they just sent was COPY.
+ * send those without noticing that the
+ * command they just sent was COPY.
*/
goto readmessage;
default:
bool fe_copy = false;
bool binary = false;
bool oids = false;
- bool csv_mode = false;
+ bool csv_mode = false;
char *delim = NULL;
char *quote = NULL;
char *escape = NULL;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- force_quote = (List *)defel->arg;
+ force_quote = (List *) defel->arg;
}
else if (strcmp(defel->defname, "force_notnull") == 0)
{
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting or redundant options")));
- force_notnull = (List *)defel->arg;
+ force_notnull = (List *) defel->arg;
}
else
elog(ERROR, "option \"%s\" not recognized",
/* Set defaults */
if (!delim)
delim = csv_mode ? "," : "\t";
-
+
if (!null_print)
null_print = csv_mode ? "" : "\\N";
if (!escape)
escape = quote;
}
-
+
/*
* Only single-character delimiter strings are supported.
*/
if (force_quote != NIL && is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force quote only available using COPY TO")));
+ errmsg("COPY force quote only available using COPY TO")));
/*
* Check force_notnull
if (!csv_mode && force_notnull != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null available only in CSV mode")));
+ errmsg("COPY force not null available only in CSV mode")));
if (force_notnull != NIL && !is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY force not null only available using COPY FROM")));
+ errmsg("COPY force not null only available using COPY FROM")));
/*
* Don't allow the delimiter to appear in the null string.
if (!list_member_int(attnumlist, attnum))
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
- errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
- NameStr(attr[attnum - 1]->attname))));
+ errmsg("FORCE QUOTE column \"%s\" not referenced by COPY",
+ NameStr(attr[attnum - 1]->attname))));
}
}
-
+
/*
* Check that FORCE NOT NULL references valid COPY columns
*/
NameStr(attr[attnum - 1]->attname))));
}
}
-
+
/*
* Set up variables to avoid per-attribute overhead.
*/
PG_CATCH();
{
/*
- * Make sure we turn off old-style COPY OUT mode upon error.
- * It is okay to do this in all cases, since it does nothing
- * if the mode is not on.
+ * Make sure we turn off old-style COPY OUT mode upon error. It is
+ * okay to do this in all cases, since it does nothing if the mode
+ * is not on.
*/
pq_endcopyout(true);
PG_RE_THROW();
{
int attnum = lfirst_int(cur);
Oid out_func_oid;
-
+
if (binary)
getTypeBinaryOutputInfo(attr[attnum - 1]->atttypid,
- &out_func_oid, &typioparams[attnum - 1],
+ &out_func_oid, &typioparams[attnum - 1],
&isvarlena[attnum - 1]);
else
getTypeOutputInfo(attr[attnum - 1]->atttypid,
while ((tuple = heap_getnext(scandesc, ForwardScanDirection)) != NULL)
{
bool need_delim = false;
+
CHECK_FOR_INTERRUPTS();
MemoryContextReset(mycontext);
{
string = DatumGetCString(FunctionCall3(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(typioparams[attnum - 1]),
+ ObjectIdGetDatum(typioparams[attnum - 1]),
Int32GetDatum(attr[attnum - 1]->atttypmod)));
if (csv_mode)
{
CopyAttributeOutCSV(string, delim, quote, escape,
- (strcmp(string, null_print) == 0 ||
- force_quote[attnum - 1]));
+ (strcmp(string, null_print) == 0 ||
+ force_quote[attnum - 1]));
}
else
CopyAttributeOut(string, delim);
outputbytes = DatumGetByteaP(FunctionCall2(&out_functions[attnum - 1],
value,
- ObjectIdGetDatum(typioparams[attnum - 1])));
+ ObjectIdGetDatum(typioparams[attnum - 1])));
/* We assume the result will not have been toasted */
CopySendInt32(VARSIZE(outputbytes) - VARHDRSZ);
CopySendData(VARDATA(outputbytes),
{
#define MAX_COPY_DATA_DISPLAY 100
- int len;
+ int len;
/* Fast path if definitely okay */
if (buf->len <= MAX_COPY_DATA_DISPLAY)
/* Fetch the input function and typioparam info */
if (binary)
getTypeBinaryInputInfo(attr[attnum - 1]->atttypid,
- &in_func_oid, &typioparams[attnum - 1]);
+ &in_func_oid, &typioparams[attnum - 1]);
else
getTypeInputInfo(attr[attnum - 1]->atttypid,
&in_func_oid, &typioparams[attnum - 1]);
force_notnull[attnum - 1] = true;
else
force_notnull[attnum - 1] = false;
-
+
/* Get default info if needed */
if (!list_member_int(attnumlist, attnum))
{
COERCE_IMPLICIT_CAST, false);
constraintexprs[attnum - 1] = ExecPrepareExpr((Expr *) node,
- estate);
+ estate);
hasConstraints = true;
}
}
done = CopyReadLine();
/*
- * EOF at start of line means we're done. If we see EOF
- * after some characters, we act as though it was newline
- * followed by EOF, ie, process the line and then exit loop
- * on next iteration.
+ * EOF at start of line means we're done. If we see EOF after
+ * some characters, we act as though it was newline followed
+ * by EOF, ie, process the line and then exit loop on next
+ * iteration.
*/
if (done && line_buf.len == 0)
break;
if (csv_mode)
{
string = CopyReadAttributeCSV(delim, null_print, quote,
- escape, &result, &isnull);
+ escape, &result, &isnull);
if (result == UNTERMINATED_FIELD)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("unterminated CSV quoted field")));
+ errmsg("unterminated CSV quoted field")));
}
else
- string = CopyReadAttribute(delim, null_print,
+ string = CopyReadAttribute(delim, null_print,
&result, &isnull);
if (csv_mode && isnull && force_notnull[m])
{
- string = null_print; /* set to NULL string */
+ string = null_print; /* set to NULL string */
isnull = false;
}
- /* we read an SQL NULL, no need to do anything */
+ /* we read an SQL NULL, no need to do anything */
if (!isnull)
{
copy_attname = NameStr(attr[m]->attname);
values[m] = FunctionCall3(&in_functions[m],
CStringGetDatum(string),
- ObjectIdGetDatum(typioparams[m]),
+ ObjectIdGetDatum(typioparams[m]),
Int32GetDatum(attr[m]->atttypmod));
nulls[m] = ' ';
copy_attname = NULL;
if (result == NORMAL_ATTR && line_buf.len != 0)
ereport(ERROR,
(errcode(ERRCODE_BAD_COPY_FILE_FORMAT),
- errmsg("extra data after last expected column")));
+ errmsg("extra data after last expected column")));
}
else
{
copy_attname = "oid";
loaded_oid =
DatumGetObjectId(CopyReadBinaryAttribute(0,
- &oid_in_function,
- oid_typioparam,
+ &oid_in_function,
+ oid_typioparam,
&isnull));
if (isnull || loaded_oid == InvalidOid)
ereport(ERROR,
result = false;
/*
- * In this loop we only care for detecting newlines (\r and/or \n)
- * and the end-of-copy marker (\.). For backwards compatibility
- * we allow backslashes to escape newline characters. Backslashes
- * other than the end marker get put into the line_buf, since
- * CopyReadAttribute does its own escape processing. These four
- * characters, and only these four, are assumed the same in frontend
- * and backend encodings. We do not assume that second and later bytes
- * of a frontend multibyte character couldn't look like ASCII characters.
+ * In this loop we only care for detecting newlines (\r and/or \n) and
+ * the end-of-copy marker (\.). For backwards compatibility we allow
+ * backslashes to escape newline characters. Backslashes other than
+ * the end marker get put into the line_buf, since CopyReadAttribute
+ * does its own escape processing. These four characters, and only
+ * these four, are assumed the same in frontend and backend encodings.
+ * We do not assume that second and later bytes of a frontend
+ * multibyte character couldn't look like ASCII characters.
*/
for (;;)
{
errmsg("end-of-copy marker does not match previous newline style")));
/*
- * In protocol version 3, we should ignore anything
- * after \. up to the protocol end of copy data. (XXX
- * maybe better not to treat \. as special?)
+ * In protocol version 3, we should ignore anything after
+ * \. up to the protocol end of copy data. (XXX maybe
+ * better not to treat \. as special?)
*/
if (copy_dest == COPY_NEW_FE)
{
/*
* When client encoding != server, must be careful to read the
- * extra bytes of a multibyte character exactly, since the encoding
- * might not ensure they don't look like ASCII. When the encodings
- * are the same, we need not do this, since no server encoding we
- * use has ASCII-like following bytes.
+ * extra bytes of a multibyte character exactly, since the
+ * encoding might not ensure they don't look like ASCII. When the
+ * encodings are the same, we need not do this, since no server
+ * encoding we use has ASCII-like following bytes.
*/
if (change_encoding)
{
if (result)
break; /* out of outer loop */
}
- } /* end of outer loop */
+ } /* end of outer loop */
/*
* Done reading the line. Convert it to server encoding.
* Note: set line_buf_converted to true *before* attempting conversion;
* this prevents infinite recursion during error reporting should
* pg_client_to_server() issue an error, due to copy_in_error_callback
- * again attempting the same conversion. We'll end up issuing the message
- * without conversion, which is bad but better than nothing ...
+ * again attempting the same conversion. We'll end up issuing the
+ * message without conversion, which is bad but better than nothing
+ * ...
*/
line_buf_converted = true;
case 'v':
c = '\v';
break;
- /*
- * in all other cases, take the char after '\' literally
- */
+
+ /*
+ * in all other cases, take the char after '\'
+ * literally
+ */
}
}
appendStringInfoCharMacro(&attribute_buf, c);
/*
- * Read the value of a single attribute in CSV mode,
+ * Read the value of a single attribute in CSV mode,
* performing de-escaping as needed. Escaping does not follow the normal
* PostgreSQL text mode, but instead "standard" (i.e. common) CSV usage.
*
* *result is set to indicate what terminated the read:
* NORMAL_ATTR: column delimiter
* END_OF_LINE: end of line
- * UNTERMINATED_FIELD no quote detected at end of a quoted field
+ * UNTERMINATED_FIELD no quote detected at end of a quoted field
*
* In any case, the string read up to the terminator (or end of file)
* is returned.
CopyReadAttributeCSV(const char *delim, const char *null_print, char *quote,
char *escape, CopyReadResult *result, bool *isnull)
{
- char delimc = delim[0];
- char quotec = quote[0];
- char escapec = escape[0];
+ char delimc = delim[0];
+ char quotec = quote[0];
+ char escapec = escape[0];
char c;
int start_cursor = line_buf.cursor;
int end_cursor = start_cursor;
int input_len;
- bool in_quote = false;
- bool saw_quote = false;
+ bool in_quote = false;
+ bool saw_quote = false;
/* reset attribute_buf to empty */
attribute_buf.len = 0;
/* handle multiline quoted fields */
if (in_quote && line_buf.cursor >= line_buf.len)
{
- bool done;
+ bool done;
- switch(eol_type)
+ switch (eol_type)
{
case EOL_NL:
- appendStringInfoString(&attribute_buf,"\n");
+ appendStringInfoString(&attribute_buf, "\n");
break;
case EOL_CR:
- appendStringInfoString(&attribute_buf,"\r");
+ appendStringInfoString(&attribute_buf, "\r");
break;
case EOL_CRNL:
- appendStringInfoString(&attribute_buf,"\r\n");
+ appendStringInfoString(&attribute_buf, "\r\n");
break;
case EOL_UNKNOWN:
/* shouldn't happen - just keep going */
if (line_buf.cursor >= line_buf.len)
break;
c = line_buf.data[line_buf.cursor++];
- /*
- * unquoted field delimiter
+
+ /*
+ * unquoted field delimiter
*/
if (!in_quote && c == delimc)
{
*result = NORMAL_ATTR;
break;
}
- /*
- * start of quoted field (or part of field)
+
+ /*
+ * start of quoted field (or part of field)
*/
if (!in_quote && c == quotec)
{
in_quote = true;
continue;
}
- /*
+
+ /*
* escape within a quoted field
*/
if (in_quote && c == escapec)
{
- /*
- * peek at the next char if available, and escape it if it
- * is an escape char or a quote char
+ /*
+ * peek at the next char if available, and escape it if it is
+ * an escape char or a quote char
*/
if (line_buf.cursor <= line_buf.len)
{
- char nextc = line_buf.data[line_buf.cursor];
+ char nextc = line_buf.data[line_buf.cursor];
+
if (nextc == escapec || nextc == quotec)
{
appendStringInfoCharMacro(&attribute_buf, nextc);
}
}
}
+
/*
- * end of quoted field.
- * Must do this test after testing for escape in case quote char
- * and escape char are the same (which is the common case).
+ * end of quoted field. Must do this test after testing for escape
+ * in case quote char and escape char are the same (which is the
+ * common case).
*/
if (in_quote && c == quotec)
{
}
/*
- * Send CSV representation of one attribute, with conversion and
+ * Send CSV representation of one attribute, with conversion and
* CSV type escaping
*/
static void
char *string;
char c;
char delimc = delim[0];
- char quotec = quote[0];
- char escapec = escape[0];
- char *test_string;
+ char quotec = quote[0];
+ char escapec = escape[0];
+ char *test_string;
bool same_encoding;
int mblen;
int i;
else
string = server_string;
- /* have to run through the string twice,
- * first time to see if it needs quoting, second to actually send it
+ /*
+ * have to run through the string twice, first time to see if it needs
+ * quoting, second to actually send it
*/
- for(test_string = string;
- !use_quote && (c = *test_string) != '\0';
- test_string += mblen)
+ for (test_string = string;
+ !use_quote && (c = *test_string) != '\0';
+ test_string += mblen)
{
if (c == delimc || c == quotec || c == '\n' || c == '\r')
use_quote = true;
if (list_member_int(attnums, attnum))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
attnums = lappend_int(attnums, attnum);
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.140 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/dbcommands.c,v 1.141 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid dboid;
AclId datdba;
ListCell *option;
- DefElem *dtablespacename = NULL;
+ DefElem *dtablespacename = NULL;
DefElem *downer = NULL;
DefElem *dtemplate = NULL;
DefElem *dencoding = NULL;
char *dbowner = NULL;
char *dbtemplate = NULL;
int encoding = -1;
+
#ifndef WIN32
char buf[2 * MAXPGPATH + 100];
#endif
&src_vacuumxid, &src_frozenxid, &src_deftablespace))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_DATABASE),
- errmsg("template database \"%s\" does not exist", dbtemplate)));
+ errmsg("template database \"%s\" does not exist", dbtemplate)));
/*
* Permission check: to copy a DB that's not marked datistemplate, you
if (dtablespacename && dtablespacename->arg)
{
char *tablespacename;
- AclResult aclresult;
+ AclResult aclresult;
tablespacename = strVal(dtablespacename->arg);
dst_deftablespace = get_tablespace_oid(tablespacename);
errmsg("tablespace \"%s\" does not exist",
tablespacename)));
/* check permissions */
- aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
+ aclresult = pg_tablespace_aclcheck(dst_deftablespace, GetUserId(),
ACL_CREATE);
- if (aclresult != ACLCHECK_OK)
- aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
- tablespacename);
+ if (aclresult != ACLCHECK_OK)
+ aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
+ tablespacename);
}
else
{
closeAllVfds();
/*
- * Iterate through all tablespaces of the template database, and
- * copy each one to the new database.
+ * Iterate through all tablespaces of the template database, and copy
+ * each one to the new database.
*
- * If we are trying to change the default tablespace of the template,
- * we require that the template not have any files in the new default
- * tablespace. This avoids the need to merge two subdirectories.
- * This could probably be improved later.
+ * If we are trying to change the default tablespace of the template, we
+ * require that the template not have any files in the new default
+ * tablespace. This avoids the need to merge two subdirectories. This
+ * could probably be improved later.
*/
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid srctablespace = HeapTupleGetOid(tuple);
- Oid dsttablespace;
- char *srcpath;
- char *dstpath;
+ Oid srctablespace = HeapTupleGetOid(tuple);
+ Oid dsttablespace;
+ char *srcpath;
+ char *dstpath;
struct stat st;
/* No need to copy global tablespace */
remove_dbtablespaces(dboid);
ereport(ERROR,
(errmsg("could not initialize database directory"),
- errdetail("Directory \"%s\" already exists.", dstpath)));
+ errdetail("Directory \"%s\" already exists.", dstpath)));
}
#ifndef WIN32
+
/*
* Copy this subdirectory to the new location
*
errdetail("Failing system command was: %s", buf),
errhint("Look in the postmaster's stderr log for more information.")));
}
-#else /* WIN32 */
+#else /* WIN32 */
if (copydir(srcpath, dstpath) != 0)
{
/* copydir should already have given details of its troubles */
ereport(ERROR,
(errmsg("could not initialize database directory")));
}
-#endif /* WIN32 */
+#endif /* WIN32 */
}
heap_endscan(scan);
heap_close(rel, AccessShareLock);
Relation rel;
ScanKeyData scankey;
SysScanDesc scan;
- Form_pg_database datForm;
+ Form_pg_database datForm;
rel = heap_openr(DatabaseRelationName, RowExclusiveLock);
ScanKeyInit(&scankey,
datForm = (Form_pg_database) GETSTRUCT(tuple);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
- * command to have succeeded. This is to be consistent with other objects.
+ * command to have succeeded. This is to be consistent with other
+ * objects.
*/
if (datForm->datdba != newOwnerSysId)
{
Datum repl_val[Natts_pg_database];
char repl_null[Natts_pg_database];
char repl_repl[Natts_pg_database];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
* necessary when the ACL is non-null.
*/
aclDatum = heap_getattr(tuple,
- Anum_pg_database_datacl,
- RelationGetDescr(rel),
- &isNull);
+ Anum_pg_database_datacl,
+ RelationGetDescr(rel),
+ &isNull);
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
static void
remove_dbtablespaces(Oid db_id)
{
- Relation rel;
+ Relation rel;
HeapScanDesc scan;
- HeapTuple tuple;
+ HeapTuple tuple;
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
scan = heap_beginscan(rel, SnapshotNow, 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid dsttablespace = HeapTupleGetOid(tuple);
- char *dstpath;
+ Oid dsttablespace = HeapTupleGetOid(tuple);
+ char *dstpath;
struct stat st;
/* Don't mess with the global tablespace */
if (!rmtree(dstpath, true))
{
ereport(WARNING,
- (errmsg("could not remove database directory \"%s\"",
- dstpath),
- errhint("Look in the postmaster's stderr log for more information.")));
+ (errmsg("could not remove database directory \"%s\"",
+ dstpath),
+ errhint("Look in the postmaster's stderr log for more information.")));
}
pfree(dstpath);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.90 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/define.c,v 1.91 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
defGetBoolean(DefElem *def)
{
/*
- * Presently, boolean flags must simply be present or absent.
- * Later we could allow 'flag = t', 'flag = f', etc.
+ * Presently, boolean flags must simply be present or absent. Later we
+ * could allow 'flag = t', 'flag = f', etc.
*/
if (def->arg == NULL)
return true;
case T_TypeName:
/* cope if grammar chooses to believe "variable" is a typename */
if (pg_strcasecmp(TypeNameToString((TypeName *) def->arg),
- "variable") == 0)
+ "variable") == 0)
return -1; /* variable length */
break;
case T_List:
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.123 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/explain.c,v 1.124 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Plan *outer_plan,
int indent, ExplainState *es)
{
- ListCell *l;
+ ListCell *l;
char *pname;
int i;
* functioncmds.c
*
* Routines for CREATE and DROP FUNCTION commands and CREATE and DROP
- * CAST commands.
+ * CAST commands.
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.51 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/functioncmds.c,v 1.52 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* These routines take the parse tree and pick out the
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("language \"%s\" does not exist", languageName),
- (strcmp(languageName, "plperl") == 0 ||
- strcmp(languageName, "plperlu") == 0 ||
- strcmp(languageName, "plpgsql") == 0 ||
- strcmp(languageName, "plpythonu") == 0 ||
- strcmp(languageName, "pltcl") == 0 ||
- strcmp(languageName, "pltclu") == 0) ?
+ (strcmp(languageName, "plperl") == 0 ||
+ strcmp(languageName, "plperlu") == 0 ||
+ strcmp(languageName, "plpgsql") == 0 ||
+ strcmp(languageName, "plpythonu") == 0 ||
+ strcmp(languageName, "pltcl") == 0 ||
+ strcmp(languageName, "pltclu") == 0) ?
errhint("You need to use \"createlang\" to load the language into the database.") : 0));
-
+
languageOid = HeapTupleGetOid(languageTuple);
languageStruct = (Form_pg_language) GETSTRUCT(languageTuple);
&prorettype, &returnsSet);
parameterCount = examine_parameter_list(stmt->parameters, languageOid,
- parameterTypes, parameterNames);
+ parameterTypes, parameterNames);
compute_attributes_with_style(stmt->withClause, &isStrict, &volatility);
procOid = LookupFuncNameTypeNames(name, argtypes, false);
tup = SearchSysCache(PROCOID,
- ObjectIdGetDatum(procOid),
- 0, 0, 0);
+ ObjectIdGetDatum(procOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for function %u", procOid);
procForm = (Form_pg_proc) GETSTRUCT(tup);
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is an aggregate function",
NameListToString(name)),
- errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
+ errhint("Use ALTER AGGREGATE to change owner of aggregate functions.")));
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
Datum repl_val[Natts_pg_proc];
char repl_null[Natts_pg_proc];
char repl_repl[Natts_pg_proc];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
if (nargs < 1 || nargs > 3)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("cast function must take one to three arguments")));
+ errmsg("cast function must take one to three arguments")));
if (procstruct->proargtypes[0] != sourcetypeid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.124 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/indexcmds.c,v 1.125 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* non-export function prototypes */
static void CheckPredicate(Expr *predicate);
static void ComputeIndexAttrs(IndexInfo *indexInfo, Oid *classOidP,
- List *attList,
- Oid relId,
- char *accessMethodName, Oid accessMethodId,
- bool isconstraint);
+ List *attList,
+ Oid relId,
+ char *accessMethodName, Oid accessMethodId,
+ bool isconstraint);
static Oid GetIndexOpClass(List *opclass, Oid attrType,
char *accessMethodName, Oid accessMethodId);
static Oid GetDefaultOpClass(Oid attrType, Oid accessMethodId);
* Verify we (still) have CREATE rights in the rel's namespace.
* (Presumably we did when the rel was created, but maybe not
* anymore.) Skip check if caller doesn't want it. Also skip check
- * if bootstrapping, since permissions machinery may not be working yet.
+ * if bootstrapping, since permissions machinery may not be working
+ * yet.
*/
if (check_rights && !IsBootstrapProcessingMode())
{
/* Determine tablespace to use */
if (tableSpaceName)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(tableSpaceName);
if (!OidIsValid(tablespaceId))
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
tableSpaceName);
- } else {
+ }
+ else
+ {
/* Use the parent rel's tablespace */
tablespaceId = get_rel_tablespace(relationId);
/* Note there is no additional permission check in this path */
/*
* If ALTER TABLE, check that there isn't already a PRIMARY KEY.
- * In CREATE TABLE, we have faith that the parser rejected multiple
- * pkey clauses; and CREATE INDEX doesn't have a way to say
- * PRIMARY KEY, so it's no problem either.
+ * In CREATE TABLE, we have faith that the parser rejected
+ * multiple pkey clauses; and CREATE INDEX doesn't have a way to
+ * say PRIMARY KEY, so it's no problem either.
*/
if (is_alter_table &&
relationHasPrimaryKey(rel))
}
/*
- * Check that all of the attributes in a primary key are marked as not
- * null, otherwise attempt to ALTER TABLE .. SET NOT NULL
+ * Check that all of the attributes in a primary key are marked as
+ * not null, otherwise attempt to ALTER TABLE .. SET NOT NULL
*/
cmds = NIL;
foreach(keys, attributeList)
if (!((Form_pg_attribute) GETSTRUCT(atttuple))->attnotnull)
{
/* Add a subcommand to make this one NOT NULL */
- AlterTableCmd *cmd = makeNode(AlterTableCmd);
+ AlterTableCmd *cmd = makeNode(AlterTableCmd);
cmd->subtype = AT_SetNotNull;
cmd->name = key->name;
}
/*
- * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade
- * to child tables? Currently, since the PRIMARY KEY
- * itself doesn't cascade, we don't cascade the
- * notnull constraint(s) either; but this is pretty debatable.
+ * XXX: Shouldn't the ALTER TABLE .. SET NOT NULL cascade to child
+ * tables? Currently, since the PRIMARY KEY itself doesn't
+ * cascade, we don't cascade the notnull constraint(s) either; but
+ * this is pretty debatable.
*
- * XXX: possible future improvement: when being called from
- * ALTER TABLE, it would be more efficient to merge this with
- * the outer ALTER TABLE, so as to avoid two scans. But that
- * seems to complicate DefineIndex's API unduly.
+ * XXX: possible future improvement: when being called from ALTER
+ * TABLE, it would be more efficient to merge this with the outer
+ * ALTER TABLE, so as to avoid two scans. But that seems to
+ * complicate DefineIndex's API unduly.
*/
if (cmds)
AlterTableInternal(relationId, cmds, false);
heap_close(rel, NoLock);
/*
- * Report index creation if appropriate (delay this till after most
- * of the error checks)
+ * Report index creation if appropriate (delay this till after most of
+ * the error checks)
*/
if (isconstraint && !quiet)
ereport(NOTICE,
(errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
- is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
+ is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
primary ? "PRIMARY KEY" : "UNIQUE",
- indexRelationName, RelationGetRelationName(rel))));
+ indexRelationName, RelationGetRelationName(rel))));
index_create(relationId, indexRelationName,
indexInfo, accessMethodId, tablespaceId, classObjectId,
if (isconstraint)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" named in key does not exist",
- attribute->name)));
+ errmsg("column \"%s\" named in key does not exist",
+ attribute->name)));
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
if (contain_subplans(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in index expression")));
+ errmsg("cannot use subquery in index expression")));
if (contain_agg_clause(attribute->expr))
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in index expression")));
+ errmsg("cannot use aggregate function in index expression")));
/*
* A expression using mutable functions is probably wrong,
* than one exact match, then someone put bogus entries in pg_opclass.
*
* The initial search is done by namespace.c so that we only consider
- * opclasses visible in the current namespace search path. (See also
+ * opclasses visible in the current namespace search path. (See also
* typcache.c, which applies the same logic, but over all opclasses.)
*/
for (opclass = OpclassGetCandidates(accessMethodId);
* separate transaction, so we can release the lock on it right away.
*/
void
-ReindexDatabase(const char *dbname, bool force /* currently unused */,
+ReindexDatabase(const char *dbname, bool force /* currently unused */ ,
bool all)
{
- Relation relationRelation;
+ Relation relationRelation;
HeapScanDesc scan;
- HeapTuple tuple;
+ HeapTuple tuple;
MemoryContext private_context;
MemoryContext old;
- List *relids = NIL;
- ListCell *l;
+ List *relids = NIL;
+ ListCell *l;
AssertArg(dbname);
/*
* We always want to reindex pg_class first. This ensures that if
* there is any corruption in pg_class' indexes, they will be fixed
- * before we process any other tables. This is critical because
+ * before we process any other tables. This is critical because
* reindexing itself will try to update pg_class.
*/
old = MemoryContextSwitchTo(private_context);
CommitTransactionCommand();
foreach(l, relids)
{
- Oid relid = lfirst_oid(l);
+ Oid relid = lfirst_oid(l);
StartTransactionCommand();
SetQuerySnapshot(); /* might be needed for functions in
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.27 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/opclasscmds.c,v 1.28 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errmsg("could not make operator class \"%s\" be default for type %s",
opcname,
TypeNameToString(stmt->datatype)),
- errdetail("Operator class \"%s\" already is the default.",
- NameStr(opclass->opcname))));
+ errdetail("Operator class \"%s\" already is the default.",
+ NameStr(opclass->opcname))));
}
systable_endscan(scan);
if (optup == NULL)
elog(ERROR, "cache lookup failed for operator %u", operOid);
opform = (Form_pg_operator) GETSTRUCT(optup);
+
/*
* btree operators must be binary ops returning boolean, and the
* left-side input type must match the operator class' input type.
if (opform->oprleft != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree operators must have index type as left input")));
+ errmsg("btree operators must have index type as left input")));
+
/*
- * The subtype is "default" (0) if oprright matches the operator class,
- * otherwise it is oprright.
+ * The subtype is "default" (0) if oprright matches the operator
+ * class, otherwise it is oprright.
*/
if (opform->oprright == typeoid)
subtype = InvalidOid;
if (proctup == NULL)
elog(ERROR, "cache lookup failed for function %u", procOid);
procform = (Form_pg_proc) GETSTRUCT(proctup);
+
/*
* btree support procs must be 2-arg procs returning int4, and the
* first input type must match the operator class' input type.
if (procform->proargtypes[0] != typeoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("btree procedures must have index type as first input")));
+ errmsg("btree procedures must have index type as first input")));
+
/*
- * The subtype is "default" (0) if second input type matches the operator
- * class, otherwise it is the second input type.
+ * The subtype is "default" (0) if second input type matches the
+ * operator class, otherwise it is the second input type.
*/
if (procform->proargtypes[1] == typeoid)
subtype = InvalidOid;
if (isProc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("procedure number %d appears more than once",
- member->number)));
+ errmsg("procedure number %d appears more than once",
+ member->number)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("operator number %d appears more than once",
- member->number)));
+ errmsg("operator number %d appears more than once",
+ member->number)));
}
}
*list = lappend(*list, member);
char *opcname;
HeapTuple tup;
Relation rel;
- Form_pg_opclass opcForm;
+ Form_pg_opclass opcForm;
amOid = GetSysCacheOid(AMNAME,
CStringGetDatum(access_method),
}
opcForm = (Form_pg_opclass) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
opcForm->opcowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.18 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/operatorcmds.c,v 1.19 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
Oid operOid;
HeapTuple tup;
Relation rel;
- Form_pg_operator oprForm;
+ Form_pg_operator oprForm;
rel = heap_openr(OperatorRelationName, RowExclusiveLock);
false);
tup = SearchSysCacheCopy(OPEROID,
- ObjectIdGetDatum(operOid),
- 0, 0, 0);
+ ObjectIdGetDatum(operOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup)) /* should not happen */
elog(ERROR, "cache lookup failed for operator %u", operOid);
oprForm = (Form_pg_operator) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on tup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on tup because it's a
+ * copy
+ */
oprForm->oprowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
heap_freetuple(tup);
}
-
-
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.32 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.33 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Also copy the outer portal's parameter list into the inner portal's
- * memory context. We want to pass down the parameter values in case
- * we had a command like
- * DECLARE c CURSOR FOR SELECT ... WHERE foo = $1
- * This will have been parsed using the outer parameter set and the
+ * memory context. We want to pass down the parameter values in case
+ * we had a command like DECLARE c CURSOR FOR SELECT ... WHERE foo =
+ * $1 This will have been parsed using the outer parameter set and the
* parameter value needs to be preserved for use when the cursor is
* executed.
*/
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
- errmsg("cursor \"%s\" does not exist", stmt->portalname)));
- return; /* keep compiler happy */
+ errmsg("cursor \"%s\" does not exist", stmt->portalname)));
+ return; /* keep compiler happy */
}
/* Adjust dest if needed. MOVE wants destination None */
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_CURSOR),
errmsg("cursor \"%s\" does not exist", name)));
- return; /* keep compiler happy */
+ return; /* keep compiler happy */
}
/*
MemoryContextSwitchTo(PortalContext);
/*
- * Rewind the executor: we need to store the entire result set in the
- * tuplestore, so that subsequent backward FETCHs can be processed.
+ * Rewind the executor: we need to store the entire result set in
+ * the tuplestore, so that subsequent backward FETCHs can be
+ * processed.
*/
ExecutorRewind(queryDesc);
/*
* Now shut down the inner executor.
*/
- portal->queryDesc = NULL; /* prevent double shutdown */
+ portal->queryDesc = NULL; /* prevent double shutdown */
ExecutorEnd(queryDesc);
/*
* Reset the position in the result set: ideally, this could be
- * implemented by just skipping straight to the tuple # that we need
- * to be at, but the tuplestore API doesn't support that. So we start
- * at the beginning of the tuplestore and iterate through it until we
- * reach where we need to be. FIXME someday?
+ * implemented by just skipping straight to the tuple # that we
+ * need to be at, but the tuplestore API doesn't support that. So
+ * we start at the beginning of the tuplestore and iterate through
+ * it until we reach where we need to be. FIXME someday?
*/
MemoryContextSwitchTo(portal->holdContext);
if (portal->posOverflow) /* oops, cannot trust portalPos */
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not reposition held cursor")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not reposition held cursor")));
tuplestore_rescan(portal->holdStore);
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.30 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/prepare.c,v 1.31 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nargs = list_length(argtypes);
ParamListInfo paramLI;
List *exprstates;
- ListCell *le, *la;
+ ListCell *le,
+ *la;
int i = 0;
/* Parser should have caught this error, but check for safety */
}
/* Explain each query */
- forboth (q, query_list, p, plan_list)
+ forboth(q, query_list, p, plan_list)
{
Query *query = (Query *) lfirst(q);
Plan *plan = (Plan *) lfirst(p);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.54 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/proclang.c,v 1.55 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type \"language_handler\"",
- NameListToString(stmt->plhandler))));
+ errmsg("function %s must return type \"language_handler\"",
+ NameListToString(stmt->plhandler))));
}
/* validate the validator function */
i = 0;
namestrcpy(&langname, languageName);
- values[i++] = NameGetDatum(&langname); /* lanname */
- values[i++] = BoolGetDatum(true); /* lanispl */
- values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
- values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
- values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
- nulls[i] = 'n'; /* lanacl */
+ values[i++] = NameGetDatum(&langname); /* lanname */
+ values[i++] = BoolGetDatum(true); /* lanispl */
+ values[i++] = BoolGetDatum(stmt->pltrusted); /* lanpltrusted */
+ values[i++] = ObjectIdGetDatum(procOid); /* lanplcallfoid */
+ values[i++] = ObjectIdGetDatum(valProcOid); /* lanvalidator */
+ nulls[i] = 'n'; /* lanacl */
rel = heap_openr(LanguageRelationName, RowExclusiveLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.23 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/schemacmds.c,v 1.24 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errdetail("The prefix \"pg_\" is reserved for system schemas.")));
/*
- * Select default tablespace for schema. If not given, use zero
- * which implies the database's default tablespace.
+ * Select default tablespace for schema. If not given, use zero which
+ * implies the database's default tablespace.
*/
if (stmt->tablespacename)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(stmt->tablespacename);
if (!OidIsValid(tablespaceId))
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
stmt->tablespacename);
- } else {
+ }
+ else
+ {
tablespaceId = InvalidOid;
/* note there is no permission check in this path */
}
{
HeapTuple tup;
Relation rel;
- Form_pg_namespace nspForm;
+ Form_pg_namespace nspForm;
rel = heap_openr(NamespaceRelationName, RowExclusiveLock);
tup = SearchSysCache(NAMESPACENAME,
- CStringGetDatum(name),
- 0, 0, 0);
+ CStringGetDatum(name),
+ 0, 0, 0);
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_SCHEMA),
errmsg("schema \"%s\" does not exist", name)));
nspForm = (Form_pg_namespace) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
Datum repl_val[Natts_pg_namespace];
char repl_null[Natts_pg_namespace];
char repl_repl[Natts_pg_namespace];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
heap_freetuple(newtuple);
}
-
+
ReleaseSysCache(tup);
heap_close(rel, NoLock);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.115 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/sequence.c,v 1.116 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
memcpy(seq, &new, sizeof(FormData_pg_sequence));
/* Clear local cache so that we don't think we have cached numbers */
- elm->last = new.last_value; /* last returned number */
+ elm->last = new.last_value; /* last returned number */
elm->cached = new.last_value; /* last cached number (forget
* cached values) */
/* MAXVALUE (null arg means NO MAXVALUE) */
if (max_value != NULL && max_value->arg)
- {
new->max_value = defGetInt64(max_value);
- }
else if (isInit || max_value != NULL)
{
if (new->increment_by > 0)
new->max_value = SEQ_MAXVALUE; /* ascending seq */
else
- new->max_value = -1; /* descending seq */
+ new->max_value = -1; /* descending seq */
}
/* MINVALUE (null arg means NO MINVALUE) */
if (min_value != NULL && min_value->arg)
- {
new->min_value = defGetInt64(min_value);
- }
else if (isInit || min_value != NULL)
{
if (new->increment_by > 0)
- new->min_value = 1; /* ascending seq */
+ new->min_value = 1; /* ascending seq */
else
new->min_value = SEQ_MINVALUE; /* descending seq */
}
buffer = XLogReadBuffer(true, reln, 0);
if (!BufferIsValid(buffer))
elog(PANIC, "seq_redo: can't read block 0 of rel %u/%u/%u",
- xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
+ xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode);
page = (Page) BufferGetPage(buffer);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.128 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablecmds.c,v 1.129 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* entries in the list until commit so that we can roll back if
* needed.
*/
- TransactionId creating_xid;
- TransactionId deleting_xid;
+ TransactionId creating_xid;
+ TransactionId deleting_xid;
} OnCommitItem;
static List *on_commits = NIL;
char relkind; /* Its relkind */
TupleDesc oldDesc; /* Pre-modification tuple descriptor */
/* Information saved by Phase 1 for Phase 2: */
- List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */
+ List *subcmds[AT_NUM_PASSES]; /* Lists of AlterTableCmd */
/* Information saved by Phases 1/2 for Phase 3: */
List *constraints; /* List of NewConstraint */
List *newvals; /* List of NewColumnValue */
/* Objects to rebuild after completing ALTER TYPE operations */
List *changedConstraintOids; /* OIDs of constraints to rebuild */
List *changedConstraintDefs; /* string definitions of same */
- List *changedIndexOids; /* OIDs of indexes to rebuild */
- List *changedIndexDefs; /* string definitions of same */
+ List *changedIndexOids; /* OIDs of indexes to rebuild */
+ List *changedIndexDefs; /* string definitions of same */
} AlteredTableInfo;
/* Struct describing one new constraint to check in Phase 3 scan */
static int transformColumnNameList(Oid relId, List *colList,
int16 *attnums, Oid *atttypids);
static int transformFkeyGetPrimaryKey(Relation pkrel, Oid *indexOid,
- List **attnamelist,
- int16 *attnums, Oid *atttypids,
- Oid *opclasses);
+ List **attnamelist,
+ int16 *attnums, Oid *atttypids,
+ Oid *opclasses);
static Oid transformFkeyCheckAttrs(Relation pkrel,
- int numattrs, int16 *attnums,
- Oid *opclasses);
+ int numattrs, int16 *attnums,
+ Oid *opclasses);
static void validateForeignKeyConstraint(FkConstraint *fkconstraint,
Relation rel, Relation pkrel);
static void createForeignKeyTriggers(Relation rel, FkConstraint *fkconstraint,
static char *fkMatchTypeToString(char match_type);
static void ATController(Relation rel, List *cmds, bool recurse);
static void ATPrepCmd(List **wqueue, Relation rel, AlterTableCmd *cmd,
- bool recurse, bool recursing);
+ bool recurse, bool recursing);
static void ATRewriteCatalogs(List **wqueue);
static void ATExecCmd(AlteredTableInfo *tab, Relation rel, AlterTableCmd *cmd);
static void ATRewriteTables(List **wqueue);
static AlteredTableInfo *ATGetQueueEntry(List **wqueue, Relation rel);
static void ATSimplePermissions(Relation rel, bool allowView);
static void ATSimpleRecursion(List **wqueue, Relation rel,
- AlterTableCmd *cmd, bool recurse);
+ AlterTableCmd *cmd, bool recurse);
static void ATOneLevelRecursion(List **wqueue, Relation rel,
- AlterTableCmd *cmd);
+ AlterTableCmd *cmd);
static void find_composite_type_dependencies(Oid typeOid,
- const char *origTblName);
+ const char *origTblName);
static void ATPrepAddColumn(List **wqueue, Relation rel, bool recurse,
- AlterTableCmd *cmd);
+ AlterTableCmd *cmd);
static void ATExecAddColumn(AlteredTableInfo *tab, Relation rel,
- ColumnDef *colDef);
+ ColumnDef *colDef);
static void add_column_datatype_dependency(Oid relid, int32 attnum, Oid typid);
static void add_column_support_dependency(Oid relid, int32 attnum,
- RangeVar *support);
+ RangeVar *support);
static void ATExecDropNotNull(Relation rel, const char *colName);
static void ATExecSetNotNull(AlteredTableInfo *tab, Relation rel,
- const char *colName);
+ const char *colName);
static void ATExecColumnDefault(Relation rel, const char *colName,
- Node *newDefault);
+ Node *newDefault);
static void ATPrepSetStatistics(Relation rel, const char *colName,
- Node *flagValue);
+ Node *flagValue);
static void ATExecSetStatistics(Relation rel, const char *colName,
- Node *newValue);
+ Node *newValue);
static void ATExecSetStorage(Relation rel, const char *colName,
- Node *newValue);
+ Node *newValue);
static void ATExecDropColumn(Relation rel, const char *colName,
- DropBehavior behavior,
- bool recurse, bool recursing);
+ DropBehavior behavior,
+ bool recurse, bool recursing);
static void ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
- IndexStmt *stmt, bool is_rebuild);
+ IndexStmt *stmt, bool is_rebuild);
static void ATExecAddConstraint(AlteredTableInfo *tab, Relation rel,
- Node *newConstraint);
+ Node *newConstraint);
static void ATAddForeignKeyConstraint(AlteredTableInfo *tab, Relation rel,
- FkConstraint *fkconstraint);
+ FkConstraint *fkconstraint);
static void ATPrepDropConstraint(List **wqueue, Relation rel,
- bool recurse, AlterTableCmd *cmd);
+ bool recurse, AlterTableCmd *cmd);
static void ATExecDropConstraint(Relation rel, const char *constrName,
- DropBehavior behavior, bool quiet);
+ DropBehavior behavior, bool quiet);
static void ATPrepAlterColumnType(List **wqueue,
- AlteredTableInfo *tab, Relation rel,
- bool recurse, bool recursing,
- AlterTableCmd *cmd);
+ AlteredTableInfo *tab, Relation rel,
+ bool recurse, bool recursing,
+ AlterTableCmd *cmd);
static void ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
- const char *colName, TypeName *typename);
+ const char *colName, TypeName *typename);
static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab);
static void ATPostAlterTypeParse(char *cmd, List **wqueue);
static void ATExecChangeOwner(Oid relationOid, int32 newOwnerSysId);
static void ATExecClusterOn(Relation rel, const char *indexName);
static void ATExecDropCluster(Relation rel);
static void ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel,
- char *tablespacename);
+ char *tablespacename);
static void ATExecSetTableSpace(Oid tableOid, Oid newTableSpace);
static void copy_relation_data(Relation rel, SMgrRelation dst);
static int ri_trigger_type(Oid tgfoid);
if (stmt->oncommit != ONCOMMIT_NOOP && !stmt->relation->istemp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("ON COMMIT can only be used on temporary tables")));
+ errmsg("ON COMMIT can only be used on temporary tables")));
/*
* Look up the namespace in which we are supposed to create the
}
/*
- * Select tablespace to use. If not specified, use containing schema's
- * default tablespace (which may in turn default to database's default).
+ * Select tablespace to use. If not specified, use containing
+ * schema's default tablespace (which may in turn default to
+ * database's default).
*/
if (stmt->tablespacename)
{
- AclResult aclresult;
+ AclResult aclresult;
tablespaceId = get_tablespace_oid(stmt->tablespacename);
if (!OidIsValid(tablespaceId))
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_TABLESPACE,
stmt->tablespacename);
- } else {
+ }
+ else
+ {
tablespaceId = get_namespace_tablespace(namespaceId);
/* note no permission check on tablespace in this case */
}
*/
schema = MergeAttributes(schema, stmt->inhRelations,
stmt->relation->istemp,
- &inheritOids, &old_constraints, &parentOidCount);
+ &inheritOids, &old_constraints, &parentOidCount);
/*
* Create a relation descriptor from the relation schema and create
if (old_constraints != NIL)
{
ConstrCheck *check = (ConstrCheck *)
- palloc0(list_length(old_constraints) * sizeof(ConstrCheck));
+ palloc0(list_length(old_constraints) * sizeof(ConstrCheck));
int ncheck = 0;
foreach(listptr, old_constraints)
{
Constraint *cdef = (Constraint *) lfirst(listptr);
- bool dup = false;
+ bool dup = false;
if (cdef->contype != CONSTR_CHECK)
continue;
Assert(cdef->name != NULL);
Assert(cdef->raw_expr == NULL && cdef->cooked_expr != NULL);
+
/*
- * In multiple-inheritance situations, it's possible to inherit
- * the same grandparent constraint through multiple parents.
- * Hence, discard inherited constraints that match as to both
- * name and expression. Otherwise, gripe if the names conflict.
+ * In multiple-inheritance situations, it's possible to
+ * inherit the same grandparent constraint through multiple
+ * parents. Hence, discard inherited constraints that match as
+ * to both name and expression. Otherwise, gripe if the names
+ * conflict.
*/
for (i = 0; i < ncheck; i++)
{
RelationGetRelationName(rel))));
/*
- * We can never allow truncation of shared or nailed-in-cache relations,
- * because we can't support changing their relfilenode values.
+ * We can never allow truncation of shared or nailed-in-cache
+ * relations, because we can't support changing their relfilenode
+ * values.
*/
if (rel->rd_rel->relisshared || rel->rd_isnailed)
ereport(ERROR,
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot truncate temporary tables of other sessions")));
+ errmsg("cannot truncate temporary tables of other sessions")));
/*
* Don't allow truncate on tables which are referenced by foreign keys
/*
* Okay, here we go: create a new empty storage file for the relation,
- * and assign it as the relfilenode value. The old storage file is
+ * and assign it as the relfilenode value. The old storage file is
* scheduled for deletion at commit.
*/
setNewRelfilenode(rel);
def->typename->typmod != attribute->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("inherited column \"%s\" has a type conflict",
- attributeName),
+ errmsg("inherited column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
format_type_be(attribute->atttypid))));
* have the same type and typmod.
*/
ereport(NOTICE,
- (errmsg("merging column \"%s\" with inherited definition",
- attributeName)));
+ (errmsg("merging column \"%s\" with inherited definition",
+ attributeName)));
def = (ColumnDef *) list_nth(inhSchema, exist_attno - 1);
if (typenameTypeId(def->typename) != typenameTypeId(newdef->typename) ||
def->typename->typmod != newdef->typename->typmod)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("column \"%s\" has a type conflict",
- attributeName),
+ errmsg("column \"%s\" has a type conflict",
+ attributeName),
errdetail("%s versus %s",
TypeNameToString(def->typename),
TypeNameToString(newdef->typename))));
/*
* Store INHERITS information in pg_inherits using direct ancestors
- * only. Also enter dependencies on the direct ancestors, and make sure
- * they are marked with relhassubclass = true.
+ * only. Also enter dependencies on the direct ancestors, and make
+ * sure they are marked with relhassubclass = true.
*
* (Once upon a time, both direct and indirect ancestors were found here
- * and then entered into pg_ipl. Since that catalog doesn't exist anymore,
- * there's no need to look for indirect ancestors.)
+ * and then entered into pg_ipl. Since that catalog doesn't exist
+ * anymore, there's no need to look for indirect ancestors.)
*/
relation = heap_openr(InheritsRelationName, RowExclusiveLock);
desc = RelationGetDescr(relation);
parentobject;
datum[0] = ObjectIdGetDatum(relationId); /* inhrel */
- datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
+ datum[1] = ObjectIdGetDatum(parentOid); /* inhparent */
datum[2] = Int16GetDatum(seqNumber); /* inhseqno */
nullarr[0] = ' ';
/*
* Fetch a modifiable copy of the tuple, modify it, update pg_class.
*
- * If the tuple already has the right relhassubclass setting, we
- * don't need to update it, but we still need to issue an SI inval
- * message.
+ * If the tuple already has the right relhassubclass setting, we don't
+ * need to update it, but we still need to issue an SI inval message.
*/
relationRelation = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" already exists",
+ errmsg("column \"%s\" of relation \"%s\" already exists",
newattname, RelationGetRelationName(targetrelation))));
namestrcpy(&(attform->attname), newattname);
* rebuild relcache entries. (Ideally this should happen
* automatically...)
*
- * We can skip this for triggers on relid itself, since that
- * relcache flush will happen anyway due to the table or column
- * rename. We just need to catch the far ends of RI relationships.
+ * We can skip this for triggers on relid itself, since that relcache
+ * flush will happen anyway due to the table or column rename. We
+ * just need to catch the far ends of RI relationships.
*/
pg_trigger = (Form_pg_trigger) GETSTRUCT(tuple);
if (pg_trigger->tgrelid != relid)
* 3. Scan table(s) to check new constraints, and optionally recopy
* the data into new table(s).
* Phase 3 is not performed unless one or more of the subcommands requires
- * it. The intention of this design is to allow multiple independent
+ * it. The intention of this design is to allow multiple independent
* updates of the table schema to be performed with only one pass over the
* data.
*
- * ATPrepCmd performs phase 1. A "work queue" entry is created for
+ * ATPrepCmd performs phase 1. A "work queue" entry is created for
* each table to be affected (there may be multiple affected tables if the
* commands traverse a table inheritance hierarchy). Also we do preliminary
* validation of the subcommands, including parse transformation of those
* phases 2 and 3 do no explicit recursion, since phase 1 already did it).
* Certain subcommands need to be performed before others to avoid
* unnecessary conflicts; for example, DROP COLUMN should come before
- * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
+ * ADD COLUMN. Therefore phase 1 divides the subcommands into multiple
* lists, one for each logical "pass" of phase 2.
*
* ATRewriteTables performs phase 3 for those tables that need it.
cmd = copyObject(cmd);
/*
- * Do permissions checking, recursion to child tables if needed,
- * and any additional phase-1 processing needed.
+ * Do permissions checking, recursion to child tables if needed, and
+ * any additional phase-1 processing needed.
*/
switch (cmd->subtype)
{
pass = AT_PASS_ADD_COL;
break;
case AT_ColumnDefault: /* ALTER COLUMN DEFAULT */
+
/*
- * We allow defaults on views so that INSERT into a view can have
- * default-ish behavior. This works because the rewriter
+ * We allow defaults on views so that INSERT into a view can
+ * have default-ish behavior. This works because the rewriter
* substitutes default values into INSERTs before it expands
* rules.
*/
break;
case AT_AddConstraint: /* ADD CONSTRAINT */
ATSimplePermissions(rel, false);
+
/*
* Currently we recurse only for CHECK constraints, never for
* foreign-key constraints. UNIQUE/PKEY constraints won't be
/* No command-specific prep needed */
pass = AT_PASS_DROP;
break;
- case AT_AlterColumnType: /* ALTER COLUMN TYPE */
+ case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATSimplePermissions(rel, false);
/* Performs own recursion */
ATPrepAlterColumnType(wqueue, tab, rel, recurse, recursing, cmd);
pass = AT_PASS_ALTER_TYPE;
break;
- case AT_ToastTable: /* CREATE TOAST TABLE */
+ case AT_ToastTable: /* CREATE TOAST TABLE */
ATSimplePermissions(rel, false);
/* This command never recurses */
/* No command-specific prep needed */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_ClusterOn: /* CLUSTER ON */
+ case AT_ClusterOn: /* CLUSTER ON */
case AT_DropCluster: /* SET WITHOUT CLUSTER */
ATSimplePermissions(rel, false);
/* These commands never recurse */
/* No command-specific prep needed */
pass = AT_PASS_MISC;
break;
- case AT_DropOids: /* SET WITHOUT OIDS */
+ case AT_DropOids: /* SET WITHOUT OIDS */
ATSimplePermissions(rel, false);
/* Performs own recursion */
if (rel->rd_rel->relhasoids)
case AT_SetTableSpace: /* SET TABLESPACE */
/* This command never recurses */
ATPrepSetTableSpace(tab, rel, cmd->name);
- pass = AT_PASS_MISC; /* doesn't actually matter */
+ pass = AT_PASS_MISC; /* doesn't actually matter */
break;
- default: /* oops */
+ default: /* oops */
elog(ERROR, "unrecognized alter table type: %d",
(int) cmd->subtype);
pass = 0; /* keep compiler quiet */
/*
* ATRewriteCatalogs
*
- * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
+ * Traffic cop for ALTER TABLE Phase 2 operations. Subcommands are
* dispatched in a "safe" execution order (designed to avoid unnecessary
* conflicts).
*/
/*
* We process all the tables "in parallel", one pass at a time. This
- * is needed because we may have to propagate work from one table
- * to another (specifically, ALTER TYPE on a foreign key's PK has to
+ * is needed because we may have to propagate work from one table to
+ * another (specifically, ALTER TYPE on a foreign key's PK has to
* dispatch the re-adding of the foreign key constraint to the other
- * table). Work can only be propagated into later passes, however.
+ * table). Work can only be propagated into later passes, however.
*/
for (pass = 0; pass < AT_NUM_PASSES; pass++)
{
if (subcmds == NIL)
continue;
- /* Exclusive lock was obtained by phase 1, needn't get it again */
+ /*
+ * Exclusive lock was obtained by phase 1, needn't get it
+ * again
+ */
rel = relation_open(tab->relid, NoLock);
foreach(lcmd, subcmds)
- {
ATExecCmd(tab, rel, (AlterTableCmd *) lfirst(lcmd));
- }
/*
- * After the ALTER TYPE pass, do cleanup work (this is not done in
- * ATExecAlterColumnType since it should be done only once if
- * multiple columns of a table are altered).
+ * After the ALTER TYPE pass, do cleanup work (this is not
+ * done in ATExecAlterColumnType since it should be done only
+ * once if multiple columns of a table are altered).
*/
if (pass == AT_PASS_ALTER_TYPE)
ATPostAlterTypeCleanup(wqueue, tab);
(tab->subcmds[AT_PASS_ADD_COL] ||
tab->subcmds[AT_PASS_ALTER_TYPE] ||
tab->subcmds[AT_PASS_COL_ATTRS]))
- {
AlterTableCreateToastTable(tab->relid, true);
- }
}
}
case AT_DropColumn: /* DROP COLUMN */
ATExecDropColumn(rel, cmd->name, cmd->behavior, false, false);
break;
- case AT_DropColumnRecurse: /* DROP COLUMN with recursion */
+ case AT_DropColumnRecurse: /* DROP COLUMN with recursion */
ATExecDropColumn(rel, cmd->name, cmd->behavior, true, false);
break;
case AT_AddIndex: /* ADD INDEX */
case AT_DropConstraintQuietly: /* DROP CONSTRAINT for child */
ATExecDropConstraint(rel, cmd->name, cmd->behavior, true);
break;
- case AT_AlterColumnType: /* ALTER COLUMN TYPE */
+ case AT_AlterColumnType: /* ALTER COLUMN TYPE */
ATExecAlterColumnType(tab, rel, cmd->name, (TypeName *) cmd->def);
break;
case AT_ToastTable: /* CREATE TOAST TABLE */
case AT_ClusterOn: /* CLUSTER ON */
ATExecClusterOn(rel, cmd->name);
break;
- case AT_DropCluster: /* SET WITHOUT CLUSTER */
+ case AT_DropCluster: /* SET WITHOUT CLUSTER */
ATExecDropCluster(rel);
break;
case AT_DropOids: /* SET WITHOUT OIDS */
+
/*
- * Nothing to do here; we'll have generated a DropColumn subcommand
- * to do the real work
+ * Nothing to do here; we'll have generated a DropColumn
+ * subcommand to do the real work
*/
break;
- case AT_SetTableSpace: /* SET TABLESPACE */
+ case AT_SetTableSpace: /* SET TABLESPACE */
+
/*
* Nothing to do here; Phase 3 does the work
*/
break;
- default: /* oops */
+ default: /* oops */
elog(ERROR, "unrecognized alter table type: %d",
(int) cmd->subtype);
break;
}
/*
- * Bump the command counter to ensure the next subcommand in the sequence
- * can see the changes so far
+ * Bump the command counter to ensure the next subcommand in the
+ * sequence can see the changes so far
*/
CommandCounterIncrement();
}
char NewHeapName[NAMEDATALEN];
Oid NewTableSpace;
Relation OldHeap;
- ObjectAddress object;
+ ObjectAddress object;
OldHeap = heap_open(tab->relid, NoLock);
/*
* We can never allow rewriting of shared or nailed-in-cache
- * relations, because we can't support changing their relfilenode
- * values.
+ * relations, because we can't support changing their
+ * relfilenode values.
*/
if (OldHeap->rd_rel->relisshared || OldHeap->rd_isnailed)
ereport(ERROR,
RelationGetRelationName(OldHeap))));
/*
- * Don't allow rewrite on temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow rewrite on temp tables of other backends ...
+ * their local buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(OldHeap)))
ereport(ERROR,
/*
* Create the new heap, using a temporary name in the same
- * namespace as the existing table. NOTE: there is some risk of
- * collision with user relnames. Working around this seems more
- * trouble than it's worth; in particular, we can't create the new
- * heap in a different namespace from the old, or we will have
- * problems with the TEMP status of temp tables.
+ * namespace as the existing table. NOTE: there is some risk
+ * of collision with user relnames. Working around this seems
+ * more trouble than it's worth; in particular, we can't
+ * create the new heap in a different namespace from the old,
+ * or we will have problems with the TEMP status of temp
+ * tables.
*/
snprintf(NewHeapName, sizeof(NewHeapName),
"pg_temp_%u", tab->relid);
object.objectSubId = 0;
/*
- * The new relation is local to our transaction and we know nothing
- * depends on it, so DROP_RESTRICT should be OK.
+ * The new relation is local to our transaction and we know
+ * nothing depends on it, so DROP_RESTRICT should be OK.
*/
performDeletion(&object, DROP_RESTRICT);
/* performDeletion does CommandCounterIncrement at end */
/*
- * Rebuild each index on the relation (but not the toast table,
- * which is all-new anyway). We do not need
+ * Rebuild each index on the relation (but not the toast
+ * table, which is all-new anyway). We do not need
* CommandCounterIncrement() because reindex_relation does it.
*/
reindex_relation(tab->relid, false);
else
{
/*
- * Test the current data within the table against new constraints
- * generated by ALTER TABLE commands, but don't rebuild data.
+ * Test the current data within the table against new
+ * constraints generated by ALTER TABLE commands, but don't
+ * rebuild data.
*/
if (tab->constraints != NIL)
ATRewriteTable(tab, InvalidOid);
+
/*
- * If we had SET TABLESPACE but no reason to reconstruct tuples,
- * just do a block-by-block copy.
+ * If we had SET TABLESPACE but no reason to reconstruct
+ * tuples, just do a block-by-block copy.
*/
if (tab->newTableSpace)
ATExecSetTableSpace(tab->relid, tab->newTableSpace);
}
/*
- * Foreign key constraints are checked in a final pass, since
- * (a) it's generally best to examine each one separately, and
- * (b) it's at least theoretically possible that we have changed
- * both relations of the foreign key, and we'd better have finished
- * both rewrites before we try to read the tables.
+ * Foreign key constraints are checked in a final pass, since (a) it's
+ * generally best to examine each one separately, and (b) it's at
+ * least theoretically possible that we have changed both relations of
+ * the foreign key, and we'd better have finished both rewrites before
+ * we try to read the tables.
*/
foreach(ltab, *wqueue)
{
- AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
- Relation rel = NULL;
- ListCell *lcon;
+ AlteredTableInfo *tab = (AlteredTableInfo *) lfirst(ltab);
+ Relation rel = NULL;
+ ListCell *lcon;
foreach(lcon, tab->constraints)
{
*/
oldrel = heap_open(tab->relid, NoLock);
oldTupDesc = tab->oldDesc;
- newTupDesc = RelationGetDescr(oldrel); /* includes all mods */
+ newTupDesc = RelationGetDescr(oldrel); /* includes all mods */
if (OidIsValid(OIDNewHeap))
newrel = heap_open(OIDNewHeap, AccessExclusiveLock);
* If we need to rewrite the table, the operation has to be propagated
* to tables that use this table's rowtype as a column type.
*
- * (Eventually this will probably become true for scans as well, but
- * at the moment a composite type does not enforce any constraints,
- * so it's not necessary/appropriate to enforce them just during ALTER.)
+ * (Eventually this will probably become true for scans as well, but at
+ * the moment a composite type does not enforce any constraints, so
+ * it's not necessary/appropriate to enforce them just during ALTER.)
*/
if (newrel)
find_composite_type_dependencies(oldrel->rd_rel->reltype,
foreach(l, tab->newvals)
{
- NewColumnValue *ex = lfirst(l);
+ NewColumnValue *ex = lfirst(l);
needscan = true;
if (needscan)
{
- ExprContext *econtext;
+ ExprContext *econtext;
Datum *values;
char *nulls;
TupleTableSlot *oldslot;
TupleTableSlot *newslot;
- HeapScanDesc scan;
+ HeapScanDesc scan;
HeapTuple tuple;
econtext = GetPerTupleExprContext(estate);
* Extract data from old tuple. We can force to null any
* columns that are deleted according to the new tuple.
*/
- int natts = newTupDesc->natts;
+ int natts = newTupDesc->natts;
heap_deformtuple(tuple, oldTupDesc, values, nulls);
}
/*
- * Process supplied expressions to replace selected columns.
- * Expression inputs come from the old tuple.
+ * Process supplied expressions to replace selected
+ * columns. Expression inputs come from the old tuple.
*/
ExecStoreTuple(tuple, oldslot, InvalidBuffer, false);
econtext->ecxt_scantuple = oldslot;
foreach(l, tab->newvals)
{
- NewColumnValue *ex = lfirst(l);
- bool isNull;
+ NewColumnValue *ex = lfirst(l);
+ bool isNull;
values[ex->attnum - 1] = ExecEvalExpr(ex->exprstate,
econtext,
con->name)));
break;
case CONSTR_NOTNULL:
- {
- Datum d;
- bool isnull;
+ {
+ Datum d;
+ bool isnull;
- d = heap_getattr(tuple, con->attnum, newTupDesc,
- &isnull);
- if (isnull)
- ereport(ERROR,
+ d = heap_getattr(tuple, con->attnum, newTupDesc,
+ &isnull);
+ if (isnull)
+ ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" contains null values",
get_attname(tab->relid,
con->attnum))));
- }
- break;
+ }
+ break;
case CONSTR_FOREIGN:
/* Nothing to do here */
break;
else if (OidIsValid(rel->rd_rel->reltype))
{
/*
- * A view or composite type itself isn't a problem, but we must
- * recursively check for indirect dependencies via its rowtype.
+ * A view or composite type itself isn't a problem, but we
+ * must recursively check for indirect dependencies via its
+ * rowtype.
*/
find_composite_type_dependencies(rel->rd_rel->reltype,
origTblName);
if (find_inheritance_children(RelationGetRelid(rel)) != NIL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("column must be added to child tables too")));
+ errmsg("column must be added to child tables too")));
}
}
attrdesc = heap_openr(AttributeRelationName, RowExclusiveLock);
/*
- * Are we adding the column to a recursion child? If so, check whether
- * to merge with an existing definition for the column.
+ * Are we adding the column to a recursion child? If so, check
+ * whether to merge with an existing definition for the column.
*/
if (colDef->inhcount > 0)
{
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("child table \"%s\" has different type for column \"%s\"",
- RelationGetRelationName(rel), colDef->colname)));
+ RelationGetRelationName(rel), colDef->colname)));
/* Bump the existing child att's inhcount */
childatt->attinhcount++;
/* Inform the user about the merge */
ereport(NOTICE,
(errmsg("merging definition of column \"%s\" for child \"%s\"",
- colDef->colname, RelationGetRelationName(rel))));
+ colDef->colname, RelationGetRelationName(rel))));
heap_close(attrdesc, RowExclusiveLock);
return;
0, 0))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" already exists",
- colDef->colname, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" already exists",
+ colDef->colname, RelationGetRelationName(rel))));
minattnum = ((Form_pg_class) GETSTRUCT(reltup))->relnatts;
maxatts = minattnum + 1;
/*
* Tell Phase 3 to fill in the default expression, if there is one.
*
- * If there is no default, Phase 3 doesn't have to do anything,
- * because that effectively means that the default is NULL. The
- * heap tuple access routines always check for attnum > # of attributes
- * in tuple, and return NULL if so, so without any modification of
- * the tuple data we will get the effect of NULL values in the new
- * column.
+ * If there is no default, Phase 3 doesn't have to do anything, because
+ * that effectively means that the default is NULL. The heap tuple
+ * access routines always check for attnum > # of attributes in tuple,
+ * and return NULL if so, so without any modification of the tuple
+ * data we will get the effect of NULL values in the new column.
*
* Note: we use build_column_default, and not just the cooked default
- * returned by AddRelationRawConstraints, so that the right thing happens
- * when a datatype's default applies.
+ * returned by AddRelationRawConstraints, so that the right thing
+ * happens when a datatype's default applies.
*/
defval = (Expr *) build_column_default(rel, attribute->attnum);
if (defval)
{
- NewColumnValue *newval;
+ NewColumnValue *newval;
newval = (NewColumnValue *) palloc0(sizeof(NewColumnValue));
newval->attnum = attribute->attnum;
if (indexStruct->indkey[i] == attnum)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("column \"%s\" is in a primary key",
- colName)));
+ errmsg("column \"%s\" is in a primary key",
+ colName)));
}
}
/*
* Okay, actually perform the catalog change ... if needed
*/
- if (! ((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
+ if (!((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull)
{
((Form_pg_attribute) GETSTRUCT(tuple))->attnotnull = TRUE;
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
/* Prevent them from altering a system attribute */
if (attnum <= 0)
ATPrepSetStatistics(Relation rel, const char *colName, Node *flagValue)
{
/*
- * We do our own permission checking because (a) we want to allow
- * SET STATISTICS on indexes (for expressional index columns), and
- * (b) we want to allow SET STATISTICS on system catalogs without
- * requiring allowSystemTableMods to be turned on.
+ * We do our own permission checking because (a) we want to allow SET
+ * STATISTICS on indexes (for expressional index columns), and (b) we
+ * want to allow SET STATISTICS on system catalogs without requiring
+ * allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
rel->rd_rel->relkind != RELKIND_INDEX)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum <= 0)
if (!HeapTupleIsValid(tuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attrtuple = (Form_pg_attribute) GETSTRUCT(tuple);
if (attrtuple->attnum <= 0)
*
* DROP COLUMN cannot use the normal ALTER TABLE recursion mechanism,
* because we have to decide at runtime whether to recurse or not depending
- * on whether attinhcount goes to zero or not. (We can't check this in a
+ * on whether attinhcount goes to zero or not. (We can't check this in a
* static pre-pass because it won't handle multiple inheritance situations
- * correctly.) Since DROP COLUMN doesn't need to create any work queue
+ * correctly.) Since DROP COLUMN doesn't need to create any work queue
* entries for Phase 3, it's okay to recurse internally in this routine
* without considering the work queue.
*/
{
/*
* If the child column has other definition sources, just
- * decrement its inheritance count; if not, recurse to delete
- * it.
+ * decrement its inheritance count; if not, recurse to
+ * delete it.
*/
if (childatt->attinhcount == 1 && !childatt->attislocal)
{
else
{
/*
- * If we were told to drop ONLY in this table (no recursion),
- * we need to mark the inheritors' attribute as locally
- * defined rather than inherited.
+ * If we were told to drop ONLY in this table (no
+ * recursion), we need to mark the inheritors' attribute
+ * as locally defined rather than inherited.
*/
childatt->attinhcount--;
childatt->attislocal = true;
class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCacheCopy(RELOID,
- ObjectIdGetDatum(RelationGetRelid(rel)),
+ ObjectIdGetDatum(RelationGetRelid(rel)),
0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u",
ATExecAddIndex(AlteredTableInfo *tab, Relation rel,
IndexStmt *stmt, bool is_rebuild)
{
- bool check_rights;
- bool skip_build;
- bool quiet;
+ bool check_rights;
+ bool skip_build;
+ bool quiet;
Assert(IsA(stmt, IndexStmt));
/* suppress notices when rebuilding existing index */
quiet = is_rebuild;
- DefineIndex(stmt->relation, /* relation */
- stmt->idxname, /* index name */
- stmt->accessMethod, /* am name */
+ DefineIndex(stmt->relation, /* relation */
+ stmt->idxname, /* index name */
+ stmt->accessMethod, /* am name */
stmt->tableSpace,
- stmt->indexParams, /* parameters */
+ stmt->indexParams, /* parameters */
(Expr *) stmt->whereClause,
stmt->rangetable,
stmt->unique,
stmt->primary,
stmt->isconstraint,
- true, /* is_alter_table */
+ true, /* is_alter_table */
check_rights,
skip_build,
quiet);
switch (nodeTag(newConstraint))
{
case T_Constraint:
- {
- Constraint *constr = (Constraint *) newConstraint;
-
- /*
- * Currently, we only expect to see CONSTR_CHECK nodes
- * arriving here (see the preprocessing done in
- * parser/analyze.c). Use a switch anyway to make it
- * easier to add more code later.
- */
- switch (constr->contype)
{
- case CONSTR_CHECK:
- {
- List *newcons;
- ListCell *lcon;
+ Constraint *constr = (Constraint *) newConstraint;
- /*
- * Call AddRelationRawConstraints to do the work.
- * It returns a list of cooked constraints.
- */
- newcons = AddRelationRawConstraints(rel, NIL,
- list_make1(constr));
- /* Add each constraint to Phase 3's queue */
- foreach(lcon, newcons)
- {
- CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon);
- NewConstraint *newcon;
-
- newcon = (NewConstraint *) palloc0(sizeof(NewConstraint));
- newcon->name = ccon->name;
- newcon->contype = ccon->contype;
- newcon->attnum = ccon->attnum;
- /* ExecQual wants implicit-AND format */
- newcon->qual = (Node *)
- make_ands_implicit((Expr *) ccon->expr);
-
- tab->constraints = lappend(tab->constraints,
- newcon);
- }
- break;
+ /*
+ * Currently, we only expect to see CONSTR_CHECK nodes
+ * arriving here (see the preprocessing done in
+ * parser/analyze.c). Use a switch anyway to make it
+ * easier to add more code later.
+ */
+ switch (constr->contype)
+ {
+ case CONSTR_CHECK:
+ {
+ List *newcons;
+ ListCell *lcon;
+
+ /*
+ * Call AddRelationRawConstraints to do the
+ * work. It returns a list of cooked
+ * constraints.
+ */
+ newcons = AddRelationRawConstraints(rel, NIL,
+ list_make1(constr));
+ /* Add each constraint to Phase 3's queue */
+ foreach(lcon, newcons)
+ {
+ CookedConstraint *ccon = (CookedConstraint *) lfirst(lcon);
+ NewConstraint *newcon;
+
+ newcon = (NewConstraint *) palloc0(sizeof(NewConstraint));
+ newcon->name = ccon->name;
+ newcon->contype = ccon->contype;
+ newcon->attnum = ccon->attnum;
+ /* ExecQual wants implicit-AND format */
+ newcon->qual = (Node *)
+ make_ands_implicit((Expr *) ccon->expr);
+
+ tab->constraints = lappend(tab->constraints,
+ newcon);
+ }
+ break;
+ }
+ default:
+ elog(ERROR, "unrecognized constraint type: %d",
+ (int) constr->contype);
}
- default:
- elog(ERROR, "unrecognized constraint type: %d",
- (int) constr->contype);
+ break;
}
- break;
- }
case T_FkConstraint:
- {
- FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
-
- /*
- * Assign or validate constraint name
- */
- if (fkconstraint->constr_name)
{
- if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
- RelationGetRelid(rel),
- RelationGetNamespace(rel),
- fkconstraint->constr_name))
- ereport(ERROR,
- (errcode(ERRCODE_DUPLICATE_OBJECT),
- errmsg("constraint \"%s\" for relation \"%s\" already exists",
- fkconstraint->constr_name,
- RelationGetRelationName(rel))));
- }
- else
- fkconstraint->constr_name =
- ChooseConstraintName(RelationGetRelationName(rel),
- strVal(linitial(fkconstraint->fk_attrs)),
- "fkey",
- RelationGetNamespace(rel),
- NIL);
+ FkConstraint *fkconstraint = (FkConstraint *) newConstraint;
- ATAddForeignKeyConstraint(tab, rel, fkconstraint);
+ /*
+ * Assign or validate constraint name
+ */
+ if (fkconstraint->constr_name)
+ {
+ if (ConstraintNameIsUsed(CONSTRAINT_RELATION,
+ RelationGetRelid(rel),
+ RelationGetNamespace(rel),
+ fkconstraint->constr_name))
+ ereport(ERROR,
+ (errcode(ERRCODE_DUPLICATE_OBJECT),
+ errmsg("constraint \"%s\" for relation \"%s\" already exists",
+ fkconstraint->constr_name,
+ RelationGetRelationName(rel))));
+ }
+ else
+ fkconstraint->constr_name =
+ ChooseConstraintName(RelationGetRelationName(rel),
+ strVal(linitial(fkconstraint->fk_attrs)),
+ "fkey",
+ RelationGetNamespace(rel),
+ NIL);
- break;
- }
+ ATAddForeignKeyConstraint(tab, rel, fkconstraint);
+
+ break;
+ }
default:
elog(ERROR, "unrecognized node type: %d",
(int) nodeTag(newConstraint));
RelationGetRelationName(rel));
/*
- * Disallow reference from permanent table to temp table or vice versa.
- * (The ban on perm->temp is for fairly obvious reasons. The ban on
- * temp->perm is because other backends might need to run the RI triggers
- * on the perm table, but they can't reliably see tuples the owning
- * backend has created in the temp table, because non-shared buffers
- * are used for temp tables.)
+ * Disallow reference from permanent table to temp table or vice
+ * versa. (The ban on perm->temp is for fairly obvious reasons. The
+ * ban on temp->perm is because other backends might need to run the
+ * RI triggers on the perm table, but they can't reliably see tuples
+ * the owning backend has created in the temp table, because
+ * non-shared buffers are used for temp tables.)
*/
if (isTempNamespace(RelationGetNamespace(pkrel)))
{
* fktypoid[i] is the foreign key table's i'th key's type
*
* Note that we look for an operator with the PK type on the left;
- * when the types are different this is critical because the PK index
- * will need operators with the indexkey on the left. (Ordinarily
- * both commutator operators will exist if either does, but we won't
- * get the right answer from the test below on opclass membership
- * unless we select the proper operator.)
+ * when the types are different this is critical because the PK
+ * index will need operators with the indexkey on the left.
+ * (Ordinarily both commutator operators will exist if either
+ * does, but we won't get the right answer from the test below on
+ * opclass membership unless we select the proper operator.)
*/
Operator o = oper(list_make1(makeString("=")),
pktypoid[i], fktypoid[i], true);
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of incompatible types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
fkconstraint->constr_name),
errdetail("Key columns \"%s\" and \"%s\" "
"are of different types: %s and %s.",
- strVal(list_nth(fkconstraint->fk_attrs, i)),
- strVal(list_nth(fkconstraint->pk_attrs, i)),
+ strVal(list_nth(fkconstraint->fk_attrs, i)),
+ strVal(list_nth(fkconstraint->pk_attrs, i)),
format_type_be(fktypoid[i]),
format_type_be(pktypoid[i]))));
}
/*
- * Tell Phase 3 to check that the constraint is satisfied by existing rows
- * (we can skip this during table creation).
+ * Tell Phase 3 to check that the constraint is satisfied by existing
+ * rows (we can skip this during table creation).
*/
if (!fkconstraint->skip_validation)
{
* transformFkeyGetPrimaryKey -
*
* Look up the names, attnums, and types of the primary key attributes
- * for the pkrel. Also return the index OID and index opclasses of the
+ * for the pkrel. Also return the index OID and index opclasses of the
* index supporting the primary key.
*
- * All parameters except pkrel are output parameters. Also, the function
+ * All parameters except pkrel are output parameters. Also, the function
* return value is the number of attributes in the primary key.
*
* Used when the column list in the REFERENCES specification is omitted.
static Oid
transformFkeyCheckAttrs(Relation pkrel,
int numattrs, int16 *attnums,
- Oid *opclasses) /* output parameter */
+ Oid *opclasses) /* output parameter */
{
Oid indexoid = InvalidOid;
bool found = false;
trig.tginitdeferred = FALSE;
trig.tgargs = (char **) palloc(sizeof(char *) *
- (4 + list_length(fkconstraint->fk_attrs)
- + list_length(fkconstraint->pk_attrs)));
+ (4 + list_length(fkconstraint->fk_attrs)
+ + list_length(fkconstraint->pk_attrs)));
trig.tgargs[0] = trig.tgname;
trig.tgargs[1] = RelationGetRelationName(rel);
/* Otherwise if more than one constraint deleted, notify */
else if (deleted > 1)
ereport(NOTICE,
- (errmsg("multiple constraints named \"%s\" were dropped",
- constrName)));
+ (errmsg("multiple constraints named \"%s\" were dropped",
+ constrName)));
}
}
CheckAttributeType(colName, targettype);
/*
- * Set up an expression to transform the old data value to the new type.
- * If a USING option was given, transform and use that expression, else
- * just take the old value and try to coerce it. We do this first so
- * that type incompatibility can be detected before we waste effort,
- * and because we need the expression to be parsed against the original
- * table rowtype.
+ * Set up an expression to transform the old data value to the new
+ * type. If a USING option was given, transform and use that
+ * expression, else just take the old value and try to coerce it. We
+ * do this first so that type incompatibility can be detected before
+ * we waste effort, and because we need the expression to be parsed
+ * against the original table rowtype.
*/
if (cmd->transform)
{
/* Expression must be able to access vars of old table */
rte = addRangeTableEntryForRelation(pstate,
RelationGetRelid(rel),
- makeAlias(RelationGetRelationName(rel), NIL),
+ makeAlias(RelationGetRelationName(rel), NIL),
false,
true);
addRTEtoQuery(pstate, rte, false, true);
if (expression_returns_set(transform))
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("transform expression must not return a set")));
+ errmsg("transform expression must not return a set")));
/* No subplans or aggregates, either... */
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in transform expression")));
+ errmsg("cannot use subquery in transform expression")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
ReleaseSysCache(tuple);
/*
- * The recursion case is handled by ATSimpleRecursion. However,
- * if we are told not to recurse, there had better not be any
- * child tables; else the alter would put them out of step.
+ * The recursion case is handled by ATSimpleRecursion. However, if we
+ * are told not to recurse, there had better not be any child tables;
+ * else the alter would put them out of step.
*/
if (recurse)
ATSimpleRecursion(wqueue, rel, cmd, recurse);
heapTup = SearchSysCacheCopyAttName(RelationGetRelid(rel), colName);
if (!HeapTupleIsValid(heapTup)) /* shouldn't happen */
ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- colName, RelationGetRelationName(rel))));
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ colName, RelationGetRelationName(rel))));
attTup = (Form_pg_attribute) GETSTRUCT(heapTup);
attnum = attTup->attnum;
/* Check for multiple ALTER TYPE on same column --- can't cope */
- if (attTup->atttypid != tab->oldDesc->attrs[attnum-1]->atttypid ||
- attTup->atttypmod != tab->oldDesc->attrs[attnum-1]->atttypmod)
+ if (attTup->atttypid != tab->oldDesc->attrs[attnum - 1]->atttypid ||
+ attTup->atttypmod != tab->oldDesc->attrs[attnum - 1]->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot alter type of column \"%s\" twice",
{
defaultexpr = build_column_default(rel, attnum);
Assert(defaultexpr);
- defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
- defaultexpr, exprType(defaultexpr),
+ defaultexpr = coerce_to_target_type(NULL, /* no UNKNOWN params */
+ defaultexpr, exprType(defaultexpr),
targettype, typename->typmod,
COERCION_ASSIGNMENT,
COERCE_IMPLICIT_CAST);
defaultexpr = NULL;
/*
- * Find everything that depends on the column (constraints, indexes, etc),
- * and record enough information to let us recreate the objects.
+ * Find everything that depends on the column (constraints, indexes,
+ * etc), and record enough information to let us recreate the objects.
*
* The actual recreation does not happen here, but only after we have
- * performed all the individual ALTER TYPE operations. We have to save
- * the info before executing ALTER TYPE, though, else the deparser will
- * get confused.
+ * performed all the individual ALTER TYPE operations. We have to
+ * save the info before executing ALTER TYPE, though, else the
+ * deparser will get confused.
*
* There could be multiple entries for the same object, so we must check
- * to ensure we process each one only once. Note: we assume that an index
- * that implements a constraint will not show a direct dependency on the
- * column.
+ * to ensure we process each one only once. Note: we assume that an
+ * index that implements a constraint will not show a direct
+ * dependency on the column.
*/
depRel = heap_openr(DependRelationName, RowExclusiveLock);
while (HeapTupleIsValid(depTup = systable_getnext(scan)))
{
- Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
- ObjectAddress foundObject;
+ Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
+ ObjectAddress foundObject;
/* We don't expect any PIN dependencies on columns */
if (foundDep->deptype == DEPENDENCY_PIN)
switch (getObjectClass(&foundObject))
{
case OCLASS_CLASS:
- {
- char relKind = get_rel_relkind(foundObject.objectId);
-
- if (relKind == RELKIND_INDEX)
{
- Assert(foundObject.objectSubId == 0);
- if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
+ char relKind = get_rel_relkind(foundObject.objectId);
+
+ if (relKind == RELKIND_INDEX)
{
- tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
- foundObject.objectId);
- tab->changedIndexDefs = lappend(tab->changedIndexDefs,
- pg_get_indexdef_string(foundObject.objectId));
+ Assert(foundObject.objectSubId == 0);
+ if (!list_member_oid(tab->changedIndexOids, foundObject.objectId))
+ {
+ tab->changedIndexOids = lappend_oid(tab->changedIndexOids,
+ foundObject.objectId);
+ tab->changedIndexDefs = lappend(tab->changedIndexDefs,
+ pg_get_indexdef_string(foundObject.objectId));
+ }
}
+ else if (relKind == RELKIND_SEQUENCE)
+ {
+ /*
+ * This must be a SERIAL column's sequence. We
+ * need not do anything to it.
+ */
+ Assert(foundObject.objectSubId == 0);
+ }
+ else
+ {
+ /* Not expecting any other direct dependencies... */
+ elog(ERROR, "unexpected object depending on column: %s",
+ getObjectDescription(&foundObject));
+ }
+ break;
}
- else if (relKind == RELKIND_SEQUENCE)
- {
- /*
- * This must be a SERIAL column's sequence. We need not
- * do anything to it.
- */
- Assert(foundObject.objectSubId == 0);
- }
- else
- {
- /* Not expecting any other direct dependencies... */
- elog(ERROR, "unexpected object depending on column: %s",
- getObjectDescription(&foundObject));
- }
- break;
- }
case OCLASS_CONSTRAINT:
Assert(foundObject.objectSubId == 0);
if (!list_member_oid(tab->changedConstraintOids, foundObject.objectId))
{
tab->changedConstraintOids = lappend_oid(tab->changedConstraintOids,
- foundObject.objectId);
+ foundObject.objectId);
tab->changedConstraintDefs = lappend(tab->changedConstraintDefs,
- pg_get_constraintdef_string(foundObject.objectId));
+ pg_get_constraintdef_string(foundObject.objectId));
}
break;
break;
case OCLASS_DEFAULT:
+
/*
- * Ignore the column's default expression, since we will fix
- * it below.
+ * Ignore the column's default expression, since we will
+ * fix it below.
*/
Assert(defaultexpr);
break;
case OCLASS_OPCLASS:
case OCLASS_TRIGGER:
case OCLASS_SCHEMA:
+
/*
* We don't expect any of these sorts of objects to depend
* on a column.
while (HeapTupleIsValid(depTup = systable_getnext(scan)))
{
- Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
+ Form_pg_depend foundDep = (Form_pg_depend) GETSTRUCT(depTup);
if (foundDep->deptype != DEPENDENCY_NORMAL)
elog(ERROR, "found unexpected dependency type '%c'",
heap_close(depRel, RowExclusiveLock);
/*
- * Here we go --- change the recorded column type. (Note heapTup is
- * a copy of the syscache entry, so okay to scribble on.)
+ * Here we go --- change the recorded column type. (Note heapTup is a
+ * copy of the syscache entry, so okay to scribble on.)
*/
attTup->atttypid = targettype;
attTup->atttypmod = typename->typmod;
/* Install dependency on new datatype */
add_column_datatype_dependency(RelationGetRelid(rel), attnum, targettype);
- /* Drop any pg_statistic entry for the column, since it's now wrong type */
+ /*
+ * Drop any pg_statistic entry for the column, since it's now wrong
+ * type
+ */
RemoveStatistics(RelationGetRelid(rel), attnum);
/*
- * Update the default, if present, by brute force --- remove and re-add
- * the default. Probably unsafe to take shortcuts, since the new version
- * may well have additional dependencies. (It's okay to do this now,
- * rather than after other ALTER TYPE commands, since the default won't
- * depend on other column types.)
+ * Update the default, if present, by brute force --- remove and
+ * re-add the default. Probably unsafe to take shortcuts, since the
+ * new version may well have additional dependencies. (It's okay to
+ * do this now, rather than after other ALTER TYPE commands, since the
+ * default won't depend on other column types.)
*/
if (defaultexpr)
{
CommandCounterIncrement();
/*
- * We use RESTRICT here for safety, but at present we do not expect
- * anything to depend on the default.
+ * We use RESTRICT here for safety, but at present we do not
+ * expect anything to depend on the default.
*/
RemoveAttrDefault(RelationGetRelid(rel), attnum, DROP_RESTRICT, true);
ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab)
{
ObjectAddress obj;
- ListCell *l;
+ ListCell *l;
/*
* Re-parse the index and constraint definitions, and attach them to
- * the appropriate work queue entries. We do this before dropping
+ * the appropriate work queue entries. We do this before dropping
* because in the case of a FOREIGN KEY constraint, we might not yet
- * have exclusive lock on the table the constraint is attached to,
- * and we need to get that before dropping. It's safe because the
- * parser won't actually look at the catalogs to detect the existing
- * entry.
+ * have exclusive lock on the table the constraint is attached to, and
+ * we need to get that before dropping. It's safe because the parser
+ * won't actually look at the catalogs to detect the existing entry.
*/
foreach(l, tab->changedIndexDefs)
- {
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
- }
foreach(l, tab->changedConstraintDefs)
- {
ATPostAlterTypeParse((char *) lfirst(l), wqueue);
- }
/*
- * Now we can drop the existing constraints and indexes --- constraints
- * first, since some of them might depend on the indexes. It should be
- * okay to use DROP_RESTRICT here, since nothing else should be depending
- * on these objects.
+ * Now we can drop the existing constraints and indexes ---
+ * constraints first, since some of them might depend on the indexes.
+ * It should be okay to use DROP_RESTRICT here, since nothing else
+ * should be depending on these objects.
*/
if (tab->changedConstraintOids)
obj.classId = get_system_catalog_relid(ConstraintRelationName);
ListCell *list_item;
/*
- * We expect that we only have to do raw parsing and parse analysis, not
- * any rule rewriting, since these will all be utility statements.
+ * We expect that we only have to do raw parsing and parse analysis,
+ * not any rule rewriting, since these will all be utility statements.
*/
raw_parsetree_list = raw_parser(cmd);
querytree_list = NIL;
Node *parsetree = (Node *) lfirst(list_item);
querytree_list = list_concat(querytree_list,
- parse_analyze(parsetree, NULL, 0));
+ parse_analyze(parsetree, NULL, 0));
}
/*
- * Attach each generated command to the proper place in the work queue.
- * Note this could result in creation of entirely new work-queue entries.
+ * Attach each generated command to the proper place in the work
+ * queue. Note this could result in creation of entirely new
+ * work-queue entries.
*/
foreach(list_item, querytree_list)
{
switch (nodeTag(query->utilityStmt))
{
case T_IndexStmt:
- {
- IndexStmt *stmt = (IndexStmt *) query->utilityStmt;
- AlterTableCmd *newcmd;
-
- rel = relation_openrv(stmt->relation, AccessExclusiveLock);
- tab = ATGetQueueEntry(wqueue, rel);
- newcmd = makeNode(AlterTableCmd);
- newcmd->subtype = AT_ReAddIndex;
- newcmd->def = (Node *) stmt;
- tab->subcmds[AT_PASS_OLD_INDEX] =
- lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd);
- relation_close(rel, NoLock);
- break;
- }
+ {
+ IndexStmt *stmt = (IndexStmt *) query->utilityStmt;
+ AlterTableCmd *newcmd;
+
+ rel = relation_openrv(stmt->relation, AccessExclusiveLock);
+ tab = ATGetQueueEntry(wqueue, rel);
+ newcmd = makeNode(AlterTableCmd);
+ newcmd->subtype = AT_ReAddIndex;
+ newcmd->def = (Node *) stmt;
+ tab->subcmds[AT_PASS_OLD_INDEX] =
+ lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd);
+ relation_close(rel, NoLock);
+ break;
+ }
case T_AlterTableStmt:
- {
- AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt;
- ListCell *lcmd;
-
- rel = relation_openrv(stmt->relation, AccessExclusiveLock);
- tab = ATGetQueueEntry(wqueue, rel);
- foreach(lcmd, stmt->cmds)
{
- AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
+ AlterTableStmt *stmt = (AlterTableStmt *) query->utilityStmt;
+ ListCell *lcmd;
- switch (cmd->subtype)
+ rel = relation_openrv(stmt->relation, AccessExclusiveLock);
+ tab = ATGetQueueEntry(wqueue, rel);
+ foreach(lcmd, stmt->cmds)
{
- case AT_AddIndex:
- cmd->subtype = AT_ReAddIndex;
- tab->subcmds[AT_PASS_OLD_INDEX] =
- lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd);
- break;
- case AT_AddConstraint:
- tab->subcmds[AT_PASS_OLD_CONSTR] =
- lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd);
- break;
- default:
- elog(ERROR, "unexpected statement type: %d",
- (int) cmd->subtype);
+ AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
+
+ switch (cmd->subtype)
+ {
+ case AT_AddIndex:
+ cmd->subtype = AT_ReAddIndex;
+ tab->subcmds[AT_PASS_OLD_INDEX] =
+ lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd);
+ break;
+ case AT_AddConstraint:
+ tab->subcmds[AT_PASS_OLD_CONSTR] =
+ lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd);
+ break;
+ default:
+ elog(ERROR, "unexpected statement type: %d",
+ (int) cmd->subtype);
+ }
}
+ relation_close(rel, NoLock);
+ break;
}
- relation_close(rel, NoLock);
- break;
- }
default:
elog(ERROR, "unexpected statement type: %d",
(int) nodeTag(query->utilityStmt));
class_rel = heap_openr(RelationRelationName, RowExclusiveLock);
tuple = SearchSysCache(RELOID,
- ObjectIdGetDatum(relationOid),
- 0, 0, 0);
+ ObjectIdGetDatum(relationOid),
+ 0, 0, 0);
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for relation %u", relationOid);
tuple_class = (Form_pg_class) GETSTRUCT(tuple);
NameStr(tuple_class->relname))));
}
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
Datum repl_val[Natts_pg_class];
char repl_null[Natts_pg_class];
char repl_repl[Natts_pg_class];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
/* Otherwise, check that we are the superuser */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change owner")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("must be superuser to change owner")));
memset(repl_null, ' ', sizeof(repl_null));
memset(repl_repl, ' ', sizeof(repl_repl));
heap_freetuple(newtuple);
/*
- * If we are operating on a table, also change the ownership of any
- * indexes that belong to the table, as well as the table's toast
- * table (if it has one)
+ * If we are operating on a table, also change the ownership of
+ * any indexes that belong to the table, as well as the table's
+ * toast table (if it has one)
*/
if (tuple_class->relkind == RELKIND_RELATION ||
tuple_class->relkind == RELKIND_TOASTVALUE)
ATPrepSetTableSpace(AlteredTableInfo *tab, Relation rel, char *tablespacename)
{
Oid tablespaceId;
- AclResult aclresult;
+ AclResult aclresult;
/*
* We do our own permission checking because we want to allow this on
if (!OidIsValid(tablespaceId))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("tablespace \"%s\" does not exist", tablespacename)));
+ errmsg("tablespace \"%s\" does not exist", tablespacename)));
/* Check its permissions */
aclresult = pg_tablespace_aclcheck(tablespaceId, GetUserId(), ACL_CREATE);
if (OidIsValid(tab->newTableSpace))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("multiple SET TABLESPACE subcommands are not valid")));
+ errmsg("multiple SET TABLESPACE subcommands are not valid")));
tab->newTableSpace = tablespaceId;
}
RelationGetRelationName(rel))));
/*
- * Don't allow moving temp tables of other backends ... their
- * local buffer manager is not going to cope.
+ * Don't allow moving temp tables of other backends ... their local
+ * buffer manager is not going to cope.
*/
if (isOtherTempNamespace(RelationGetNamespace(rel)))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot move temporary tables of other sessions")));
+ errmsg("cannot move temporary tables of other sessions")));
/*
* No work if no change in tablespace.
bool use_wal;
BlockNumber nblocks;
BlockNumber blkno;
- char buf[BLCKSZ];
+ char buf[BLCKSZ];
Page page = (Page) buf;
/*
- * Since we copy the data directly without looking at the shared buffers,
- * we'd better first flush out any pages of the source relation that are
- * in shared buffers. We assume no new pages will get loaded into
- * buffers while we are holding exclusive lock on the rel.
+ * Since we copy the data directly without looking at the shared
+ * buffers, we'd better first flush out any pages of the source
+ * relation that are in shared buffers. We assume no new pages will
+ * get loaded into buffers while we are holding exclusive lock on the
+ * rel.
*/
FlushRelationBuffers(rel, 0);
}
/*
- * Now write the page. We say isTemp = true even if it's not a
+ * Now write the page. We say isTemp = true even if it's not a
* temp rel, because there's no need for smgr to schedule an fsync
* for this write; we'll do it ourselves below.
*/
/*
* If the rel isn't temp, we must fsync it down to disk before it's
- * safe to commit the transaction. (For a temp rel we don't care
+ * safe to commit the transaction. (For a temp rel we don't care
* since the rel will be uninteresting after a crash anyway.)
*
- * It's obvious that we must do this when not WAL-logging the copy.
- * It's less obvious that we have to do it even if we did WAL-log the
+ * It's obvious that we must do this when not WAL-logging the copy. It's
+ * less obvious that we have to do it even if we did WAL-log the
* copied pages. The reason is that since we're copying outside
* shared buffers, a CHECKPOINT occurring during the copy has no way
* to flush the previously written data to disk (indeed it won't know
- * the new rel even exists). A crash later on would replay WAL from the
- * checkpoint, therefore it wouldn't replay our earlier WAL entries.
- * If we do not fsync those pages here, they might still not be on disk
- * when the crash occurs.
+ * the new rel even exists). A crash later on would replay WAL from
+ * the checkpoint, therefore it wouldn't replay our earlier WAL
+ * entries. If we do not fsync those pages here, they might still not
+ * be on disk when the crash occurs.
*/
if (!rel->rd_istemp)
smgrimmedsync(dst);
*
* Note: this is also invoked from outside this module; in such cases we
* expect the caller to have verified that the relation is a table and we
- * have all the right permissions. Callers expect this function
+ * have all the right permissions. Callers expect this function
* to end with CommandCounterIncrement if it makes any changes.
*/
void
/*
* Grab an exclusive lock on the target table, which we will NOT
- * release until end of transaction. (This is probably redundant
- * in all present uses...)
+ * release until end of transaction. (This is probably redundant in
+ * all present uses...)
*/
rel = heap_open(relOid, AccessExclusiveLock);
* We cannot allow toasting a shared relation after initdb (because
* there's no way to mark it toasted in other databases' pg_class).
* Unfortunately we can't distinguish initdb from a manually started
- * standalone backend (toasting happens after the bootstrap phase,
- * so checking IsBootstrapProcessingMode() won't work). However, we can
+ * standalone backend (toasting happens after the bootstrap phase, so
+ * checking IsBootstrapProcessingMode() won't work). However, we can
* at least prevent this mistake under normal multi-user operation.
*/
shared_relation = rel->rd_rel->relisshared;
if (shared_relation && IsUnderPostmaster)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("shared tables cannot be toasted after initdb")));
+ errmsg("shared tables cannot be toasted after initdb")));
/*
* Is it already toasted?
void
AtEOXact_on_commit_actions(bool isCommit, TransactionId xid)
{
- ListCell *cur_item;
- ListCell *prev_item;
+ ListCell *cur_item;
+ ListCell *prev_item;
prev_item = NULL;
cur_item = list_head(on_commits);
* Post-subcommit or post-subabort cleanup for ON COMMIT management.
*
* During subabort, we can immediately remove entries created during this
- * subtransaction. During subcommit, just relabel entries marked during
+ * subtransaction. During subcommit, just relabel entries marked during
* this subtransaction as being the parent's responsibility.
*/
void
AtEOSubXact_on_commit_actions(bool isCommit, TransactionId childXid,
TransactionId parentXid)
{
- ListCell *cur_item;
- ListCell *prev_item;
+ ListCell *cur_item;
+ ListCell *prev_item;
prev_item = NULL;
cur_item = list_head(on_commits);
* To allow CREATE DATABASE to give a new database a default tablespace
* that's different from the template database's default, we make the
* provision that a zero in pg_class.reltablespace means the database's
- * default tablespace. Without this, CREATE DATABASE would have to go in
+ * default tablespace. Without this, CREATE DATABASE would have to go in
* and munge the system catalogs of the new database. This special meaning
* of zero also applies in pg_namespace.nsptablespace.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.8 2004/08/08 01:31:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/tablespace.c,v 1.9 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
#ifdef HAVE_SYMLINK
struct stat st;
- char *dir;
+ char *dir;
/*
- * The global tablespace doesn't have per-database subdirectories,
- * so nothing to do for it.
+ * The global tablespace doesn't have per-database subdirectories, so
+ * nothing to do for it.
*/
if (spcNode == GLOBALTABLESPACE_OID)
return;
* DROP TABLESPACE or TablespaceCreateDbspace is running
* concurrently. Simple reads from pg_tablespace are OK.
*/
- Relation rel;
+ Relation rel;
if (!isRedo)
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
rel = NULL;
/*
- * Recheck to see if someone created the directory while
- * we were waiting for lock.
+ * Recheck to see if someone created the directory while we
+ * were waiting for lock.
*/
if (stat(dir, &st) == 0 && S_ISDIR(st.st_mode))
{
if (mkdir(dir, S_IRWXU) < 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not create directory \"%s\": %m",
- dir)));
+ errmsg("could not create directory \"%s\": %m",
+ dir)));
}
/* OK to drop the exclusive lock */
}
pfree(dir);
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
/*
CreateTableSpace(CreateTableSpaceStmt *stmt)
{
#ifdef HAVE_SYMLINK
- Relation rel;
- Datum values[Natts_pg_tablespace];
+ Relation rel;
+ Datum values[Natts_pg_tablespace];
char nulls[Natts_pg_tablespace];
HeapTuple tuple;
Oid tablespaceoid;
- char *location;
- char *linkloc;
+ char *location;
+ char *linkloc;
AclId ownerid;
/* validate */
/* Must be super user */
if (!superuser())
ereport(ERROR,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to create tablespace \"%s\"",
- stmt->tablespacename),
- errhint("Must be superuser to create a tablespace.")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to create tablespace \"%s\"",
+ stmt->tablespacename),
+ errhint("Must be superuser to create a tablespace.")));
/* However, the eventual owner of the tablespace need not be */
if (stmt->owner)
if (strchr(location, '\''))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("tablespace location may not contain single quotes")));
+ errmsg("tablespace location may not contain single quotes")));
/*
* Allowing relative paths seems risky
errmsg("tablespace location must be an absolute path")));
/*
- * Check that location isn't too long. Remember that we're going to append
- * '//.' (XXX but do we ever form the whole path
- * explicitly? This may be overly conservative.)
+ * Check that location isn't too long. Remember that we're going to
+ * append '//.' (XXX but do we ever form the whole
+ * path explicitly? This may be overly conservative.)
*/
if (strlen(location) >= (MAXPGPATH - 1 - 10 - 1 - 10 - 1 - 10))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"",
stmt->tablespacename),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/*
- * Check that there is no other tablespace by this name. (The
- * unique index would catch this anyway, but might as well give
- * a friendlier message.)
+ * Check that there is no other tablespace by this name. (The unique
+ * index would catch this anyway, but might as well give a friendlier
+ * message.)
*/
if (OidIsValid(get_tablespace_oid(stmt->tablespacename)))
ereport(ERROR,
heap_freetuple(tuple);
/*
- * Attempt to coerce target directory to safe permissions. If this
+ * Attempt to coerce target directory to safe permissions. If this
* fails, it doesn't exist or has the wrong owner.
*/
if (chmod(location, 0700) != 0)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not set permissions on directory \"%s\": %m",
- location)));
+ errmsg("could not set permissions on directory \"%s\": %m",
+ location)));
/*
* Check the target directory is empty.
location)));
/*
- * Create the PG_VERSION file in the target directory. This has several
- * purposes: to make sure we can write in the directory, to prevent
- * someone from creating another tablespace pointing at the same
- * directory (the emptiness check above will fail), and to label
+ * Create the PG_VERSION file in the target directory. This has
+ * several purposes: to make sure we can write in the directory, to
+ * prevent someone from creating another tablespace pointing at the
+ * same directory (the emptiness check above will fail), and to label
* tablespace directories by PG version.
*/
set_short_version(location);
heap_close(rel, RowExclusiveLock);
-#else /* !HAVE_SYMLINK */
+#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("tablespaces are not supported on this platform")));
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
/*
DropTableSpace(DropTableSpaceStmt *stmt)
{
#ifdef HAVE_SYMLINK
- char *tablespacename = stmt->tablespacename;
- HeapScanDesc scandesc;
- Relation rel;
- HeapTuple tuple;
- ScanKeyData entry[1];
- char *location;
- Oid tablespaceoid;
- DIR *dirdesc;
+ char *tablespacename = stmt->tablespacename;
+ HeapScanDesc scandesc;
+ Relation rel;
+ HeapTuple tuple;
+ ScanKeyData entry[1];
+ char *location;
+ Oid tablespaceoid;
+ DIR *dirdesc;
struct dirent *de;
- char *subfile;
+ char *subfile;
/* don't call this in a transaction block */
PreventTransactionChain((void *) stmt, "DROP TABLESPACE");
/*
* Acquire ExclusiveLock on pg_tablespace to ensure that no one else
- * is trying to do DROP TABLESPACE or TablespaceCreateDbspace concurrently.
+ * is trying to do DROP TABLESPACE or TablespaceCreateDbspace
+ * concurrently.
*/
rel = heap_openr(TableSpaceRelationName, ExclusiveLock);
/*
* Check if the tablespace still contains any files. We try to rmdir
* each per-database directory we find in it. rmdir failure implies
- * there are still files in that subdirectory, so give up. (We do not
- * have to worry about undoing any already completed rmdirs, since
- * the next attempt to use the tablespace from that database will simply
+ * there are still files in that subdirectory, so give up. (We do not
+ * have to worry about undoing any already completed rmdirs, since the
+ * next attempt to use the tablespace from that database will simply
* recreate the subdirectory via TablespaceCreateDbspace.)
*
- * Since we hold exclusive lock, no one else should be creating any
- * fresh subdirectories in parallel. It is possible that new files
- * are being created within subdirectories, though, so the rmdir
- * call could fail. Worst consequence is a less friendly error message.
+ * Since we hold exclusive lock, no one else should be creating any fresh
+ * subdirectories in parallel. It is possible that new files are
+ * being created within subdirectories, though, so the rmdir call
+ * could fail. Worst consequence is a less friendly error message.
*/
dirdesc = AllocateDir(location);
if (dirdesc == NULL)
pfree(subfile);
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not remove junction dir \"%s\": %m",
- location)));
+ location)));
#endif
pfree(subfile);
pfree(location);
/*
- * We have successfully destroyed the infrastructure ... there is
- * now no way to roll back the DROP ... so proceed to remove the
+ * We have successfully destroyed the infrastructure ... there is now
+ * no way to roll back the DROP ... so proceed to remove the
* pg_tablespace tuple.
*/
simple_heap_delete(rel, &tuple->t_self);
heap_close(rel, ExclusiveLock);
-#else /* !HAVE_SYMLINK */
+#else /* !HAVE_SYMLINK */
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("tablespaces are not supported on this platform")));
-#endif /* HAVE_SYMLINK */
+#endif /* HAVE_SYMLINK */
}
static bool
directory_is_empty(const char *path)
{
- DIR *dirdesc;
+ DIR *dirdesc;
struct dirent *de;
dirdesc = AllocateDir(path);
return false;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
Oid
get_tablespace_oid(const char *tablespacename)
{
- Oid result;
- Relation rel;
+ Oid result;
+ Relation rel;
HeapScanDesc scandesc;
HeapTuple tuple;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
/* Search pg_tablespace */
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
else
result = InvalidOid;
- heap_endscan(scandesc);
- heap_close(rel, AccessShareLock);
+ heap_endscan(scandesc);
+ heap_close(rel, AccessShareLock);
return result;
}
char *
get_tablespace_name(Oid spc_oid)
{
- char *result;
- Relation rel;
+ char *result;
+ Relation rel;
HeapScanDesc scandesc;
HeapTuple tuple;
- ScanKeyData entry[1];
+ ScanKeyData entry[1];
/* Search pg_tablespace */
rel = heap_openr(TableSpaceRelationName, AccessShareLock);
else
result = NULL;
- heap_endscan(scandesc);
- heap_close(rel, AccessShareLock);
+ heap_endscan(scandesc);
+ heap_close(rel, AccessShareLock);
return result;
}
void
RenameTableSpace(const char *oldname, const char *newname)
{
- Relation rel;
- ScanKeyData entry[1];
+ Relation rel;
+ ScanKeyData entry[1];
HeapScanDesc scan;
HeapTuple tup;
HeapTuple newtuple;
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable tablespace name \"%s\"", newname),
- errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
+ errdetail("The prefix \"pg_\" is reserved for system tablespaces.")));
/* Make sure the new name doesn't exist */
ScanKeyInit(&entry[0],
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("tablespace \"%s\" already exists",
newname)));
-
+
heap_endscan(scan);
/* OK, update the entry */
void
AlterTableSpaceOwner(const char *name, AclId newOwnerSysId)
{
- Relation rel;
- ScanKeyData entry[1];
+ Relation rel;
+ ScanKeyData entry[1];
HeapScanDesc scandesc;
Form_pg_tablespace spcForm;
HeapTuple tup;
spcForm = (Form_pg_tablespace) GETSTRUCT(tup);
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
Datum repl_val[Natts_pg_tablespace];
char repl_null[Natts_pg_tablespace];
char repl_repl[Natts_pg_tablespace];
- Acl *newAcl;
+ Acl *newAcl;
Datum aclDatum;
bool isNull;
HeapTuple newtuple;
* necessary when the ACL is non-null.
*/
aclDatum = heap_getattr(tup,
- Anum_pg_tablespace_spcacl,
- RelationGetDescr(rel),
- &isNull);
+ Anum_pg_tablespace_spcacl,
+ RelationGetDescr(rel),
+ &isNull);
if (!isNull)
{
newAcl = aclnewowner(DatumGetAclP(aclDatum),
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.167 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/trigger.c,v 1.168 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!HeapTupleIsValid(tup))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- trigname, get_rel_name(relid))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ trigname, get_rel_name(relid))));
if (!pg_class_ownercheck(relid, GetUserId()))
aclcheck_error(ACLCHECK_NOT_OWNER, ACL_KIND_CLASS,
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("trigger \"%s\" for table \"%s\" does not exist",
- oldname, RelationGetRelationName(targetrel))));
+ errmsg("trigger \"%s\" for table \"%s\" does not exist",
+ oldname, RelationGetRelationName(targetrel))));
}
systable_endscan(tgscan);
* Deferred trigger stuff
*
* The DeferredTriggersData struct holds data about pending deferred
- * trigger events during the current transaction tree. The struct and
+ * trigger events during the current transaction tree. The struct and
* most of its subsidiary data are kept in TopTransactionContext; however
* the individual event records are kept in CurTransactionContext, so that
* they will easily go away during subtransaction abort.
* saves a copy, which we use to restore the state if we abort.
*
* numpushed and numalloc keep control of allocation and storage in the above
- * stacks. numpushed is essentially the current subtransaction nesting depth.
+ * stacks. numpushed is essentially the current subtransaction nesting depth.
*
* XXX We need to be able to save the per-event data in a file if it grows too
* large.
*/
typedef struct DeferredTriggerStateData
{
- bool all_isset;
- bool all_isdeferred;
- int numstates; /* number of trigstates[] entries in use */
- int numalloc; /* allocated size of trigstates[] */
- DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
+ bool all_isset;
+ bool all_isdeferred;
+ int numstates; /* number of trigstates[] entries in use */
+ int numalloc; /* allocated size of trigstates[] */
+ DeferredTriggerStatusData trigstates[1]; /* VARIABLE LENGTH ARRAY */
} DeferredTriggerStateData;
typedef DeferredTriggerStateData *DeferredTriggerState;
/* Per-transaction data */
typedef struct DeferredTriggersData
{
- DeferredTriggerState state;
- DeferredTriggerEvent events;
- DeferredTriggerEvent tail_thisxact;
- DeferredTriggerEvent events_imm;
- DeferredTriggerEvent *tail_stack;
- DeferredTriggerEvent *imm_stack;
- DeferredTriggerState *state_stack;
- int numpushed;
- int numalloc;
+ DeferredTriggerState state;
+ DeferredTriggerEvent events;
+ DeferredTriggerEvent tail_thisxact;
+ DeferredTriggerEvent events_imm;
+ DeferredTriggerEvent *tail_stack;
+ DeferredTriggerEvent *imm_stack;
+ DeferredTriggerState *state_stack;
+ int numpushed;
+ int numalloc;
} DeferredTriggersData;
typedef DeferredTriggersData *DeferredTriggers;
static DeferredTriggerState DeferredTriggerStateCreate(int numalloc);
static DeferredTriggerState DeferredTriggerStateCopy(DeferredTriggerState state);
static DeferredTriggerState DeferredTriggerStateAddItem(DeferredTriggerState state,
- Oid tgoid, bool tgisdeferred);
+ Oid tgoid, bool tgisdeferred);
/* ----------
static bool
deferredTriggerCheckState(Oid tgoid, int32 itemstate)
{
- bool tgisdeferred;
- int i;
+ bool tgisdeferred;
+ int i;
/*
* For not-deferrable triggers (i.e. normal AFTER ROW triggers and
/*
* No ALL state known either, remember the default state as the
- * current and return that. (XXX why do we bother making a state entry?)
+ * current and return that. (XXX why do we bother making a state
+ * entry?)
*/
tgisdeferred = ((itemstate & TRIGGER_DEFERRED_INITDEFERRED) != 0);
deferredTriggers->state =
/*
* If immediate_only is true, then the only events that could need
- * firing are those since events_imm. (But if
- * events_imm is NULL, we must scan the entire list.)
+ * firing are those since events_imm. (But if events_imm is NULL, we
+ * must scan the entire list.)
*/
if (immediate_only && deferredTriggers->events_imm != NULL)
{
int i;
/*
- * Skip executing cancelled events, and events done by transactions
- * that are not aborted.
+ * Skip executing cancelled events, and events done by
+ * transactions that are not aborted.
*/
if (!(event->dte_event & TRIGGER_DEFERRED_CANCELED) ||
- (event->dte_event & TRIGGER_DEFERRED_DONE &&
- TransactionIdIsValid(event->dte_done_xid) &&
- !TransactionIdDidAbort(event->dte_done_xid)))
+ (event->dte_event & TRIGGER_DEFERRED_DONE &&
+ TransactionIdIsValid(event->dte_done_xid) &&
+ !TransactionIdDidAbort(event->dte_done_xid)))
{
MemoryContextReset(per_tuple_context);
for (i = 0; i < event->dte_n_items; i++)
{
if (event->dte_item[i].dti_state & TRIGGER_DEFERRED_DONE &&
- TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
- !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
+ TransactionIdIsValid(event->dte_item[i].dti_done_xid) &&
+ !(TransactionIdDidAbort(event->dte_item[i].dti_done_xid)))
continue;
/*
{
/*
* We can drop an item if it's done, but only if we're not
- * inside a subtransaction because it could abort later on.
- * We will want to check the item again if it does.
+ * inside a subtransaction because it could abort later on. We
+ * will want to check the item again if it does.
*/
if (immediate_only && !IsSubTransaction())
{
/*
* Forget everything we know about deferred triggers.
*
- * Since all the info is in TopTransactionContext or children thereof,
- * we need do nothing special to reclaim memory.
+ * Since all the info is in TopTransactionContext or children thereof, we
+ * need do nothing special to reclaim memory.
*/
deferredTriggers = NULL;
}
/*
* Forget everything we know about deferred triggers.
*
- * Since all the info is in TopTransactionContext or children thereof,
- * we need do nothing special to reclaim memory.
+ * Since all the info is in TopTransactionContext or children thereof, we
+ * need do nothing special to reclaim memory.
*/
deferredTriggers = NULL;
}
deferredTriggers->tail_stack = (DeferredTriggerEvent *)
repalloc(deferredTriggers->tail_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
deferredTriggers->imm_stack = (DeferredTriggerEvent *)
repalloc(deferredTriggers->imm_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerEvent));
deferredTriggers->state_stack = (DeferredTriggerState *)
repalloc(deferredTriggers->state_stack,
- deferredTriggers->numalloc * sizeof(DeferredTriggerState));
+ deferredTriggers->numalloc * sizeof(DeferredTriggerState));
}
}
deferredTriggers->tail_thisxact->dte_next = NULL;
/*
- * We don't need to free the items, since the CurTransactionContext
- * will be reset shortly.
+ * We don't need to free the items, since the
+ * CurTransactionContext will be reset shortly.
*/
/*
state = (DeferredTriggerState)
MemoryContextAllocZero(TopTransactionContext,
sizeof(DeferredTriggerStateData) +
- (numalloc - 1) * sizeof(DeferredTriggerStatusData));
+ (numalloc - 1) *sizeof(DeferredTriggerStatusData));
state->numalloc = numalloc;
{
if (state->numstates >= state->numalloc)
{
- int newalloc = state->numalloc * 2;
+ int newalloc = state->numalloc * 2;
- newalloc = Max(newalloc, 8); /* in case original has size 0 */
+ newalloc = Max(newalloc, 8); /* in case original has size 0 */
state = (DeferredTriggerState)
repalloc(state,
sizeof(DeferredTriggerStateData) +
- (newalloc - 1) * sizeof(DeferredTriggerStatusData));
+ (newalloc - 1) *sizeof(DeferredTriggerStatusData));
state->numalloc = newalloc;
Assert(state->numstates < state->numalloc);
}
return;
/*
- * If in a subtransaction, and we didn't save the current state already,
- * save it so it can be restored if the subtransaction aborts.
+ * If in a subtransaction, and we didn't save the current state
+ * already, save it so it can be restored if the subtransaction
+ * aborts.
*/
if (deferredTriggers->numpushed > 0 &&
deferredTriggers->state_stack[deferredTriggers->numpushed - 1] == NULL)
return;
/*
- * Create a new event. We use the CurTransactionContext so the event
+ * Create a new event. We use the CurTransactionContext so the event
* will automatically go away if the subtransaction aborts.
*/
oldcxt = MemoryContextSwitchTo(CurTransactionContext);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.62 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/typecmds.c,v 1.63 2004/08/29 05:06:41 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type \"cstring\"",
- NameListToString(outputName))));
+ errmsg("type output function %s must return type \"cstring\"",
+ NameListToString(outputName))));
}
if (receiveOid)
{
if (resulttype != typoid)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type receive function %s must return type %s",
- NameListToString(receiveName), typeName)));
+ errmsg("type receive function %s must return type %s",
+ NameListToString(receiveName), typeName)));
}
if (sendOid)
{
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type \"bytea\"",
- NameListToString(sendName))));
+ errmsg("type send function %s must return type \"bytea\"",
+ NameListToString(sendName))));
}
/*
- * Convert analysis function proc name to an OID. If no analysis function
- * is specified, we'll use zero to select the built-in default algorithm.
+ * Convert analysis function proc name to an OID. If no analysis
+ * function is specified, we'll use zero to select the built-in
+ * default algorithm.
*/
if (analyzeName)
analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
* arguments (data value, element OID).
*
* For backwards compatibility we allow OPAQUE in place of the actual
- * type name; if we see this, we issue a warning and fix up the pg_proc
- * entry.
+ * type name; if we see this, we issue a warning and fix up the
+ * pg_proc entry.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
{
/* Found, but must complain and fix the pg_proc entry */
ereport(WARNING,
- (errmsg("changing argument type of function %s from \"opaque\" to %s",
- NameListToString(procname), format_type_be(typeOid))));
+ (errmsg("changing argument type of function %s from \"opaque\" to %s",
+ NameListToString(procname), format_type_be(typeOid))));
SetFunctionArgType(procOid, 0, typeOid);
/*
Oid procOid;
/*
- * Analyze functions always take one INTERNAL argument and return bool.
+ * Analyze functions always take one INTERNAL argument and return
+ * bool.
*/
MemSet(argList, 0, FUNC_MAX_ARGS * sizeof(Oid));
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type \"boolean\"",
- NameListToString(procname))));
+ errmsg("type analyze function %s must return type \"boolean\"",
+ NameListToString(procname))));
return procOid;
}
errmsg("composite type must have at least one attribute")));
/*
- * now set the parameters for keys/inheritance etc. All of these
- * are uninteresting for composite types...
+ * now set the parameters for keys/inheritance etc. All of these are
+ * uninteresting for composite types...
*/
createStmt->relation = (RangeVar *) typevar;
createStmt->tableElts = coldeflist;
ereport(ERROR,
(errcode(ERRCODE_NOT_NULL_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains null values",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
- RelationGetRelationName(testrel))));
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
+ RelationGetRelationName(testrel))));
}
}
heap_endscan(scan);
if (IsA(newConstraint, FkConstraint))
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("foreign key constraints not possible for domains")));
+ errmsg("foreign key constraints not possible for domains")));
/* otherwise it should be a plain Constraint */
if (!IsA(newConstraint, Constraint))
case CONSTR_UNIQUE:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("unique constraints not possible for domains")));
+ errmsg("unique constraints not possible for domains")));
break;
case CONSTR_PRIMARY:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("primary key constraints not possible for domains")));
+ errmsg("primary key constraints not possible for domains")));
break;
case CONSTR_ATTR_DEFERRABLE:
ereport(ERROR,
(errcode(ERRCODE_CHECK_VIOLATION),
errmsg("column \"%s\" of table \"%s\" contains values that violate the new constraint",
- NameStr(tupdesc->attrs[attnum - 1]->attname),
+ NameStr(tupdesc->attrs[attnum - 1]->attname),
RelationGetRelationName(testrel))));
}
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * If it's a composite type, we need to check that it really is a
- * free-standing composite type, and not a table's underlying type.
- * We want people to use ALTER TABLE not ALTER TYPE for that case.
+ * If it's a composite type, we need to check that it really is a
+ * free-standing composite type, and not a table's underlying type. We
+ * want people to use ALTER TABLE not ALTER TYPE for that case.
*/
if (typTup->typtype == 'c' && get_rel_relkind(typTup->typrelid) != 'c')
ereport(ERROR,
errmsg("\"%s\" is a table's row type",
TypeNameToString(typename))));
- /*
+ /*
* If the new owner is the same as the existing owner, consider the
* command to have succeeded. This is for dump restoration purposes.
*/
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to change owner")));
- /* Modify the owner --- okay to scribble on typTup because it's a copy */
+ /*
+ * Modify the owner --- okay to scribble on typTup because it's a
+ * copy
+ */
typTup->typowner = newOwnerSysId;
simple_heap_update(rel, &tup->t_self, tup);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.143 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/user.c,v 1.144 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* The need-to-update-files flags are a pair of TransactionIds that show what
- * level of the transaction tree requested the update. To register an update,
+ * level of the transaction tree requested the update. To register an update,
* the transaction saves its own TransactionId in the flag, unless the value
* was already set to a valid TransactionId. If it aborts and the value is its
- * TransactionId, it resets the value to InvalidTransactionId. If it commits,
+ * TransactionId, it resets the value to InvalidTransactionId. If it commits,
* it changes the value to its parent's TransactionId. This way the value is
* propagated up to the topmost transaction, which will update the files if a
* valid TransactionId is detected.
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to temporary file \"%s\": %m", tempname)));
+ errmsg("could not write to temporary file \"%s\": %m", tempname)));
/*
* Read pg_group and write the file. Note we use SnapshotSelf to
if (fp == NULL)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to temporary file \"%s\": %m", tempname)));
+ errmsg("could not write to temporary file \"%s\": %m", tempname)));
/*
* Read pg_shadow and write the file. Note we use SnapshotSelf to
errmsg("user \"%s\" does not exist", stmt->user)));
if (!(superuser() ||
- ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
+ ((Form_pg_shadow) GETSTRUCT(oldtuple))->usesysid == GetUserId()))
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied")));
char repl_null[Natts_pg_shadow];
char repl_repl[Natts_pg_shadow];
int i;
-
+
/* ExclusiveLock because we need to update the password file */
rel = heap_openr(ShadowRelationName, ExclusiveLock);
dsc = RelationGetDescr(rel);
oldtuple = SearchSysCache(SHADOWNAME,
- CStringGetDatum(oldname),
- 0, 0, 0);
+ CStringGetDatum(oldname),
+ 0, 0, 0);
if (!HeapTupleIsValid(oldtuple))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
repl_repl[Anum_pg_shadow_usename - 1] = 'r';
repl_val[Anum_pg_shadow_usename - 1] = DirectFunctionCall1(namein,
- CStringGetDatum(newname));
+ CStringGetDatum(newname));
repl_null[Anum_pg_shadow_usename - 1] = ' ';
datum = heap_getattr(oldtuple, Anum_pg_shadow_passwd, dsc, &isnull);
/* MD5 uses the username as salt, so just clear it on a rename */
repl_repl[Anum_pg_shadow_passwd - 1] = 'r';
repl_null[Anum_pg_shadow_passwd - 1] = 'n';
-
+
ereport(NOTICE,
- (errmsg("MD5 password cleared because of user rename")));
+ (errmsg("MD5 password cleared because of user rename")));
}
-
+
newtuple = heap_modifytuple(oldtuple, rel, repl_val, repl_null, repl_repl);
simple_heap_update(rel, &oldtuple->t_self, newtuple);
-
+
CatalogUpdateIndexes(rel, newtuple);
ReleaseSysCache(oldtuple);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.288 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.289 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* As these variables always appear together, we put them into one struct
* and pull initialization and cleanup into separate routines.
* ExecContext is used by repair_frag() and move_xxx_tuple(). More
- * accurately: It is *used* only in move_xxx_tuple(), but because this
+ * accurately: It is *used* only in move_xxx_tuple(), but because this
* routine is called many times, we initialize the struct just once in
* repair_frag() and pass it on to move_xxx_tuple().
*/
ec->estate = CreateExecutorState();
ec->resultRelInfo = makeNode(ResultRelInfo);
- ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
+ ec->resultRelInfo->ri_RangeTableIndex = 1; /* dummy */
ec->resultRelInfo->ri_RelationDesc = rel;
- ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
+ ec->resultRelInfo->ri_TrigDesc = NULL; /* we don't fire triggers */
ExecOpenIndices(ec->resultRelInfo);
ExecCloseIndices(ec->resultRelInfo);
FreeExecutorState(ec->estate);
}
+
/*
* End of ExecContext Implementation
*----------------------------------------------------------------------
VacPageList vacuum_pages, VacPageList fraged_pages,
int nindexes, Relation *Irel);
static void move_chain_tuple(Relation rel,
- Buffer old_buf, Page old_page, HeapTuple old_tup,
- Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
- ExecContext ec, ItemPointer ctid, bool cleanVpd);
+ Buffer old_buf, Page old_page, HeapTuple old_tup,
+ Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
+ ExecContext ec, ItemPointer ctid, bool cleanVpd);
static void move_plain_tuple(Relation rel,
- Buffer old_buf, Page old_page, HeapTuple old_tup,
- Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
- ExecContext ec);
+ Buffer old_buf, Page old_page, HeapTuple old_tup,
+ Buffer dst_buf, Page dst_page, VacPage dst_vacpage,
+ ExecContext ec);
static void update_hint_bits(Relation rel, VacPageList fraged_pages,
- int num_fraged_pages, BlockNumber last_move_dest_block,
- int num_moved);
+ int num_fraged_pages, BlockNumber last_move_dest_block,
+ int num_moved);
static void vacuum_heap(VRelStats *vacrelstats, Relation onerel,
VacPageList vacpagelist);
static void vacuum_page(Relation onerel, Buffer buffer, VacPage vacpage);
* Furthermore, the forced commit that occurs before truncating the
* relation's file would have the effect of committing the rest of the
* user's transaction too, which would certainly not be the desired
- * behavior. (This only applies to VACUUM FULL, though. We could
- * in theory run lazy VACUUM inside a transaction block, but we choose
- * to disallow that case because we'd rather commit as soon as possible
- * after finishing the vacuum. This is mainly so that we can let go the
- * AccessExclusiveLock that we may be holding.)
+ * behavior. (This only applies to VACUUM FULL, though. We could in
+ * theory run lazy VACUUM inside a transaction block, but we choose to
+ * disallow that case because we'd rather commit as soon as possible
+ * after finishing the vacuum. This is mainly so that we can let go
+ * the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
in_outer_xact = false;
}
else
- {
in_outer_xact = IsInTransactionChain((void *) vacstmt);
- }
/*
* Send info about dead objects to the statistics collector
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit
- * XIDs, so that we can record these values at the end of the
- * VACUUM. Note that individual tables may well be processed
- * with newer values, but we can guarantee that no
- * (non-shared) relations are processed with older ones.
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
+ * so that we can record these values at the end of the VACUUM.
+ * Note that individual tables may well be processed with newer
+ * values, but we can guarantee that no (non-shared) relations are
+ * processed with older ones.
*
- * It is okay to record non-shared values in pg_database, even
- * though we may vacuum shared relations with older cutoffs,
- * because only the minimum of the values present in
- * pg_database matters. We can be sure that shared relations
- * have at some time been vacuumed with cutoffs no worse than
- * the global minimum; for, if there is a backend in some
- * other DB with xmin = OLDXMIN that's determining the cutoff
- * with which we vacuum shared relations, it is not possible
- * for that database to have a cutoff newer than OLDXMIN
- * recorded in pg_database.
+ * It is okay to record non-shared values in pg_database, even though
+ * we may vacuum shared relations with older cutoffs, because only
+ * the minimum of the values present in pg_database matters. We
+ * can be sure that shared relations have at some time been
+ * vacuumed with cutoffs no worse than the global minimum; for, if
+ * there is a backend in some other DB with xmin = OLDXMIN that's
+ * determining the cutoff with which we vacuum shared relations,
+ * it is not possible for that database to have a cutoff newer
+ * than OLDXMIN recorded in pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we
- * can release locks as soon as possible. (We could possibly use the
+ * For VACUUM (with or without ANALYZE): always do so, so that we can
+ * release locks as soon as possible. (We could possibly use the
* outer transaction for a one-table VACUUM, but handling TOAST tables
* would be problematic.)
*
* locks sooner.
*/
if (vacstmt->vacuum)
- {
use_own_xacts = true;
- }
else
{
Assert(vacstmt->analyze);
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * vacuum_rel expects to be entered with no transaction active; it will
- * start and commit its own transaction. But we are called by an SQL
- * command, and so we are executing inside a transaction already. We
- * commit the transaction started in PostgresMain() here, and start
+ * vacuum_rel expects to be entered with no transaction active; it
+ * will start and commit its own transaction. But we are called by an
+ * SQL command, and so we are executing inside a transaction already.
+ * We commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back
* in PostgresMain().
*/
if (vacstmt->vacuum)
{
if (!vacuum_rel(relid, vacstmt, RELKIND_RELATION))
- all_rels = false; /* forget about updating dbstats */
+ all_rels = false; /* forget about updating dbstats */
}
if (vacstmt->analyze)
{
MemoryContext old_context = NULL;
/*
- * If using separate xacts, start one for analyze. Otherwise,
- * we can use the outer transaction, but we still need to call
- * analyze_rel in a memory context that will be cleaned up on
- * return (else we leak memory while processing multiple
- * tables).
+ * If using separate xacts, start one for analyze.
+ * Otherwise, we can use the outer transaction, but we
+ * still need to call analyze_rel in a memory context that
+ * will be cleaned up on return (else we leak memory while
+ * processing multiple tables).
*/
if (use_own_xacts)
{
StartTransactionCommand();
- SetQuerySnapshot(); /* might be needed for functions
- * in indexes */
+ SetQuerySnapshot(); /* might be needed for functions
+ * in indexes */
}
else
old_context = MemoryContextSwitchTo(anl_context);
* indexes */
/*
- * Tell the cache replacement strategy that vacuum is causing
- * all following IO
+ * Tell the cache replacement strategy that vacuum is causing all
+ * following IO
*/
StrategyHintVacuum(true);
}
/*
- * Check that it's a plain table; we used to do this in
- * get_rel_oids() but seems safer to check after we've locked the
- * relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids()
+ * but seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
if (PageIsNew(page))
{
- VacPage vacpagecopy;
+ VacPage vacpagecopy;
ereport(WARNING,
(errmsg("relation \"%s\" page %u is uninitialized --- fixing",
if (PageIsEmpty(page))
{
- VacPage vacpagecopy;
+ VacPage vacpagecopy;
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
if (do_reap || do_frag)
{
- VacPage vacpagecopy = copy_vac_page(vacpage);
+ VacPage vacpagecopy = copy_vac_page(vacpage);
+
if (do_reap)
vpage_insert(vacuum_pages, vacpagecopy);
if (do_frag)
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n"
- "Nonremovable row versions range from %lu to %lu bytes long.\n"
+ "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable row versions) is %.0f bytes.\n"
+ "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
BlockNumber last_move_dest_block = 0,
last_vacuum_block;
Page dst_page = NULL;
- ExecContextData ec;
+ ExecContextData ec;
VacPageListData Nvacpagelist;
VacPage dst_vacpage = NULL,
last_vacuum_page,
blkno > last_move_dest_block;
blkno--)
{
- Buffer buf;
- Page page;
- OffsetNumber offnum,
- maxoff;
- bool isempty,
- dowrite,
- chain_tuple_moved;
+ Buffer buf;
+ Page page;
+ OffsetNumber offnum,
+ maxoff;
+ bool isempty,
+ dowrite,
+ chain_tuple_moved;
vacuum_delay_point();
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- Size tuple_len;
- HeapTupleData tuple;
- ItemId itemid = PageGetItemId(page, offnum);
+ Size tuple_len;
+ HeapTupleData tuple;
+ ItemId itemid = PageGetItemId(page, offnum);
if (!ItemIdIsUsed(itemid))
continue;
/*
* VACUUM FULL has an exclusive lock on the relation. So
* normally no other transaction can have pending INSERTs or
- * DELETEs in this relation. A tuple is either
- * (a) a tuple in a system catalog, inserted or deleted by
- * a not yet committed transaction or
- * (b) dead (XMIN_INVALID or XMAX_COMMITTED) or
- * (c) inserted by a committed xact (XMIN_COMMITTED) or
- * (d) moved by the currently running VACUUM.
- * In case (a) we wouldn't be in repair_frag() at all.
+ * DELETEs in this relation. A tuple is either (a) a tuple in
+ * a system catalog, inserted or deleted by a not yet
+ * committed transaction or (b) dead (XMIN_INVALID or
+ * XMAX_COMMITTED) or (c) inserted by a committed xact
+ * (XMIN_COMMITTED) or (d) moved by the currently running
+ * VACUUM. In case (a) we wouldn't be in repair_frag() at all.
* In case (b) we cannot be here, because scan_heap() has
- * already marked the item as unused, see continue above.
- * Case (c) is what normally is to be expected.
- * Case (d) is only possible, if a whole tuple chain has been
- * moved while processing this or a higher numbered block.
+ * already marked the item as unused, see continue above. Case
+ * (c) is what normally is to be expected. Case (d) is only
+ * possible, if a whole tuple chain has been moved while
+ * processing this or a higher numbered block.
*/
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
/*
- * There cannot be another concurrently running VACUUM. If
- * the tuple had been moved in by a previous VACUUM, the
- * visibility check would have set XMIN_COMMITTED. If the
- * tuple had been moved in by the currently running VACUUM,
- * the loop would have been terminated. We had
+ * There cannot be another concurrently running VACUUM.
+ * If the tuple had been moved in by a previous VACUUM,
+ * the visibility check would have set XMIN_COMMITTED. If
+ * the tuple had been moved in by the currently running
+ * VACUUM, the loop would have been terminated. We had
* elog(ERROR, ...) here, but as we are testing for a
- * can't-happen condition, Assert() seems more appropriate.
+ * can't-happen condition, Assert() seems more
+ * appropriate.
*/
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
* moved while cleaning this page or some previous one.
*/
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
+
/*
* MOVED_OFF by another VACUUM would have caused the
* visibility check to set XMIN_COMMITTED or XMIN_INVALID.
/* Can't we Assert(keep_tuples > 0) here? */
if (keep_tuples == 0)
continue;
- if (chain_tuple_moved) /* some chains was moved
- * while */
- { /* cleaning this page */
+ if (chain_tuple_moved) /* some chains was moved while */
+ { /* cleaning this page */
Assert(vacpage->offsets_free > 0);
for (i = 0; i < vacpage->offsets_free; i++)
{
if (vacpage->offsets[i] == offnum)
break;
}
- if (i >= vacpage->offsets_free) /* not found */
+ if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = offnum;
keep_tuples--;
off <= maxoff;
off = OffsetNumberNext(off))
{
- ItemId itemid = PageGetItemId(page, off);
- HeapTupleHeader htup;
+ ItemId itemid = PageGetItemId(page, off);
+ HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
continue;
htup = (HeapTupleHeader) PageGetItem(page, itemid);
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
continue;
+
/*
- ** See comments in the walk-along-page loop above, why we
- ** have Asserts here instead of if (...) elog(ERROR).
- */
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
+ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
if (vacpage->offsets[i] == off)
break;
}
- if (i >= vacpage->offsets_free) /* not found */
+ if (i >= vacpage->offsets_free) /* not found */
{
vacpage->offsets[vacpage->offsets_free++] = off;
Assert(keep_tuples > 0);
*/
update_hint_bits(onerel, fraged_pages, num_fraged_pages,
last_move_dest_block, num_moved);
-
+
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
* processing that occurs below.
*/
ereport(elevel,
- (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
- RelationGetRelationName(onerel),
- num_moved, nblocks, blkno),
- errdetail("%s",
- vac_show_rusage(&ru0))));
+ (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s",
+ vac_show_rusage(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
*vpleft = *vpright;
*vpright = vpsave;
}
+
/*
* keep_tuples is the number of tuples that have been moved
* off a page during chain moves but not been scanned over
if (vacpage->blkno == (blkno - 1) &&
vacpage->offsets_free > 0)
{
- Buffer buf;
- Page page;
- OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
- OffsetNumber offnum,
- maxoff;
- int uncnt;
- int num_tuples = 0;
+ Buffer buf;
+ Page page;
+ OffsetNumber unused[BLCKSZ / sizeof(OffsetNumber)];
+ OffsetNumber offnum,
+ maxoff;
+ int uncnt;
+ int num_tuples = 0;
buf = ReadBuffer(onerel, vacpage->blkno);
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
offnum <= maxoff;
offnum = OffsetNumberNext(offnum))
{
- ItemId itemid = PageGetItemId(page, offnum);
+ ItemId itemid = PageGetItemId(page, offnum);
HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
continue;
/*
- ** See comments in the walk-along-page loop above, why we
- ** have Asserts here instead of if (...) elog(ERROR).
- */
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
+ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
Assert(HeapTupleHeaderGetXvac(htup) == myXID);
ExecContext ec, ItemPointer ctid, bool cleanVpd)
{
TransactionId myXID = GetCurrentTransactionId();
- HeapTupleData newtup;
- OffsetNumber newoff;
- ItemId newitemid;
- Size tuple_len = old_tup->t_len;
+ HeapTupleData newtup;
+ OffsetNumber newoff;
+ ItemId newitemid;
+ Size tuple_len = old_tup->t_len;
heap_copytuple_with_tuple(old_tup, &newtup);
START_CRIT_SECTION();
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID |
- HEAP_MOVED_IN);
+ HEAP_XMIN_INVALID |
+ HEAP_MOVED_IN);
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
/*
* If this page was not used before - clean it.
*
- * NOTE: a nasty bug used to lurk here. It is possible
- * for the source and destination pages to be the same
- * (since this tuple-chain member can be on a page
- * lower than the one we're currently processing in
- * the outer loop). If that's true, then after
- * vacuum_page() the source tuple will have been
- * moved, and tuple.t_data will be pointing at
- * garbage. Therefore we must do everything that uses
+ * NOTE: a nasty bug used to lurk here. It is possible for the source
+ * and destination pages to be the same (since this tuple-chain member
+ * can be on a page lower than the one we're currently processing in
+ * the outer loop). If that's true, then after vacuum_page() the
+ * source tuple will have been moved, and tuple.t_data will be
+ * pointing at garbage. Therefore we must do everything that uses
* old_tup->t_data BEFORE this step!!
*
- * This path is different from the other callers of
- * vacuum_page, because we have already incremented
- * the vacpage's offsets_used field to account for the
- * tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong.
- * But since that's a good debugging check for all
- * other callers, we work around it here rather than
- * remove it.
+ * This path is different from the other callers of vacuum_page, because
+ * we have already incremented the vacpage's offsets_used field to
+ * account for the tuple(s) we expect to move onto the page. Therefore
+ * vacuum_page's check for offsets_used == 0 is wrong. But since
+ * that's a good debugging check for all other callers, we work around
+ * it here rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
- int sv_offsets_used = dst_vacpage->offsets_used;
+ int sv_offsets_used = dst_vacpage->offsets_used;
dst_vacpage->offsets_used = 0;
vacuum_page(rel, dst_buf, dst_vacpage);
}
/*
- * Update the state of the copied tuple, and store it
- * on the destination page.
+ * Update the state of the copied tuple, and store it on the
+ * destination page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
if (newoff == InvalidOffsetNumber)
{
elog(PANIC, "failed to add item with len = %lu to page %u while moving tuple chain",
- (unsigned long) tuple_len, dst_vacpage->blkno);
+ (unsigned long) tuple_len, dst_vacpage->blkno);
}
newitemid = PageGetItemId(dst_page, newoff);
pfree(newtup.t_data);
else
{
/*
- * No XLOG record, but still need to flag that XID
- * exists on disk
+ * No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
END_CRIT_SECTION();
/*
- * Set new tuple's t_ctid pointing to itself for last
- * tuple in chain, and to next tuple in chain
- * otherwise.
+ * Set new tuple's t_ctid pointing to itself for last tuple in chain,
+ * and to next tuple in chain otherwise.
*/
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
if (!ItemPointerIsValid(ctid))
ExecContext ec)
{
TransactionId myXID = GetCurrentTransactionId();
- HeapTupleData newtup;
- OffsetNumber newoff;
- ItemId newitemid;
- Size tuple_len = old_tup->t_len;
+ HeapTupleData newtup;
+ OffsetNumber newoff;
+ ItemId newitemid;
+ Size tuple_len = old_tup->t_len;
/* copy tuple */
heap_copytuple_with_tuple(old_tup, &newtup);
/*
* register invalidation of source tuple in catcaches.
*
- * (Note: we do not need to register the copied tuple, because we
- * are not changing the tuple contents and so there cannot be
- * any need to flush negative catcache entries.)
+ * (Note: we do not need to register the copied tuple, because we are not
+ * changing the tuple contents and so there cannot be any need to
+ * flush negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
* Mark old tuple as MOVED_OFF by me.
*/
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
- HEAP_XMIN_INVALID |
- HEAP_MOVED_IN);
+ HEAP_XMIN_INVALID |
+ HEAP_MOVED_IN);
old_tup->t_data->t_infomask |= HEAP_MOVED_OFF;
HeapTupleHeaderSetXvac(old_tup->t_data, myXID);
else
{
/*
- * No XLOG record, but still need to flag that XID exists
- * on disk
+ * No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true;
}
END_CRIT_SECTION();
dst_vacpage->free = ((PageHeader) dst_page)->pd_upper -
- ((PageHeader) dst_page)->pd_lower;
+ ((PageHeader) dst_page)->pd_lower;
LockBuffer(dst_buf, BUFFER_LOCK_UNLOCK);
LockBuffer(old_buf, BUFFER_LOCK_UNLOCK);
{
int checked_moved = 0;
int i;
- VacPage *curpage;
+ VacPage *curpage;
for (i = 0, curpage = fraged_pages->pagedesc;
i < num_fraged_pages;
i++, curpage++)
{
- Buffer buf;
- Page page;
- OffsetNumber max_offset;
- OffsetNumber off;
- int num_tuples = 0;
+ Buffer buf;
+ Page page;
+ OffsetNumber max_offset;
+ OffsetNumber off;
+ int num_tuples = 0;
vacuum_delay_point();
off <= max_offset;
off = OffsetNumberNext(off))
{
- ItemId itemid = PageGetItemId(page, off);
- HeapTupleHeader htup;
+ ItemId itemid = PageGetItemId(page, off);
+ HeapTupleHeader htup;
if (!ItemIdIsUsed(itemid))
continue;
htup = (HeapTupleHeader) PageGetItem(page, itemid);
if (htup->t_infomask & HEAP_XMIN_COMMITTED)
continue;
+
/*
- * See comments in the walk-along-page loop above, why we
- * have Asserts here instead of if (...) elog(ERROR). The
+ * See comments in the walk-along-page loop above, why we have
+ * Asserts here instead of if (...) elog(ERROR). The
* difference here is that we may see MOVED_IN.
*/
Assert(htup->t_infomask & HEAP_MOVED);
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
if (VacuumCostActive && !InterruptPending &&
VacuumCostBalance >= VacuumCostLimit)
{
- int msec;
+ int msec;
msec = VacuumCostDelay * VacuumCostBalance / VacuumCostLimit;
if (msec > VacuumCostDelay * 4)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.44 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.45 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
pfree(stats);
}
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
pfree(stats);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.100 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/variable.c,v 1.101 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for parameter \"datestyle\"")));
+ errmsg("invalid list syntax for parameter \"datestyle\"")));
return NULL;
}
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("unrecognized \"datestyle\" key word: \"%s\"",
- tok)));
+ errmsg("unrecognized \"datestyle\" key word: \"%s\"",
+ tok)));
ok = false;
break;
}
*
* During GUC initialization, since the timezone library isn't
* set up yet, pg_get_current_timezone will return NULL and we
- * will leave the setting as UNKNOWN. If this isn't overridden
- * from the config file then pg_timezone_initialize() will
- * eventually select a default value from the environment.
+ * will leave the setting as UNKNOWN. If this isn't
+ * overridden from the config file then
+ * pg_timezone_initialize() will eventually select a default
+ * value from the environment.
*/
const char *curzone = pg_get_current_timezone();
* Otherwise assume it is a timezone name.
*
* We have to actually apply the change before we can have any
- * hope of checking it. So, save the old value in case we have
- * to back out. We have to copy since pg_get_current_timezone
- * returns a pointer to its static state.
+ * hope of checking it. So, save the old value in case we
+ * have to back out. We have to copy since
+ * pg_get_current_timezone returns a pointer to its static
+ * state.
*
- * This would all get a lot simpler if the TZ library had a better
- * API that would let us look up and test a timezone name without
- * making it the default.
+ * This would all get a lot simpler if the TZ library had a
+ * better API that would let us look up and test a timezone
+ * name without making it the default.
*/
const char *cur_tz;
char *save_tz;
else
{
/*
- * TZ library wasn't initialized yet. Annoyingly, we will
- * come here during startup because guc-file.l checks
- * the value with doit = false before actually applying.
- * The best approach seems to be as follows:
+ * TZ library wasn't initialized yet. Annoyingly, we
+ * will come here during startup because guc-file.l
+ * checks the value with doit = false before actually
+ * applying. The best approach seems to be as follows:
*
* 1. known && acceptable: leave the setting in place,
* since we'll apply it soon anyway. This is mainly
- * so that any log messages printed during this interval
- * are timestamped with the user's requested timezone.
+ * so that any log messages printed during this
+ * interval are timestamped with the user's requested
+ * timezone.
*
- * 2. known && !acceptable: revert to GMT for lack of
- * any better idea. (select_default_timezone() may get
+ * 2. known && !acceptable: revert to GMT for lack of any
+ * better idea. (select_default_timezone() may get
* called later to undo this.)
*
- * 3. !known: no need to do anything since TZ library
- * did not change its state.
+ * 3. !known: no need to do anything since TZ library did
+ * not change its state.
*
* Again, this should all go away sometime soon.
*/
const char *
show_timezone(void)
{
- const char *tzn;
+ const char *tzn;
if (HasCTZSet)
{
{
if (doit && source >= PGC_S_INTERACTIVE)
{
- if (SerializableSnapshot != NULL)
- ereport(ERROR,
- (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
- if (IsSubTransaction())
- ereport(ERROR,
- (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
- errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
+ if (SerializableSnapshot != NULL)
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("SET TRANSACTION ISOLATION LEVEL must be called before any query")));
+ if (IsSubTransaction())
+ ereport(ERROR,
+ (errcode(ERRCODE_ACTIVE_SQL_TRANSACTION),
+ errmsg("SET TRANSACTION ISOLATION LEVEL must not be called in a subtransaction")));
}
if (strcmp(value, "serializable") == 0)
* limit on names, so we can tell whether we're being passed an initial
* username or a saved/restored value.
*/
-extern char *session_authorization_string; /* in guc.c */
+extern char *session_authorization_string; /* in guc.c */
const char *
assign_session_authorization(const char *value, bool doit, GucSource source)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.84 2004/08/29 04:12:30 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/view.c,v 1.85 2004/08/29 05:06:41 momjian Exp $
*
*-------------------------------------------------------------------------
*/
newattr->atttypmod != oldattr->atttypmod)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TABLE_DEFINITION),
- errmsg("cannot change data type of view column \"%s\"",
- NameStr(oldattr->attname))));
+ errmsg("cannot change data type of view column \"%s\"",
+ NameStr(oldattr->attname))));
/* We can ignore the remaining attributes of an attribute... */
}
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.80 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execAmi.c,v 1.81 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* needs access to variables of the current outer tuple. (The handling of
* this parameter is currently pretty inconsistent: some callers pass NULL
* and some pass down their parent's value; so don't rely on it in other
- * situations. It'd probably be better to remove the whole thing and use
+ * situations. It'd probably be better to remove the whole thing and use
* the generalized parameter mechanism instead.)
*/
void
/* If we have changed parameters, propagate that info */
if (node->chgParam != NULL)
{
- ListCell *l;
+ ListCell *l;
foreach(l, node->initPlan)
{
{
/*
* At a table scan node, we check whether ExecAssignScanProjectionInfo
- * decided to do projection or not. Most non-scan nodes always project
- * and so we can return "false" immediately. For nodes that don't
- * project but just pass up input tuples, we have to recursively
+ * decided to do projection or not. Most non-scan nodes always
+ * project and so we can return "false" immediately. For nodes that
+ * don't project but just pass up input tuples, we have to recursively
* examine the input plan node.
*
- * Note: Hash and Material are listed here because they sometimes
- * return an original input tuple, not a copy. But Sort and SetOp
- * never return an original tuple, so they can be treated like
- * projecting nodes.
+ * Note: Hash and Material are listed here because they sometimes return
+ * an original input tuple, not a copy. But Sort and SetOp never
+ * return an original tuple, so they can be treated like projecting
+ * nodes.
*/
switch (nodeTag(node))
{
- /* Table scan nodes */
+ /* Table scan nodes */
case T_SeqScanState:
case T_IndexScanState:
case T_TidScanState:
return true;
break;
- /* Non-projecting nodes */
+ /* Non-projecting nodes */
case T_HashState:
case T_MaterialState:
case T_UniqueState:
return ExecMayReturnRawTuples(node->lefttree);
case T_AppendState:
- {
- AppendState *appendstate = (AppendState *) node;
- int j;
-
- for (j = 0; j < appendstate->as_nplans; j++)
{
- if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
- return true;
+ AppendState *appendstate = (AppendState *) node;
+ int j;
+
+ for (j = 0; j < appendstate->as_nplans; j++)
+ {
+ if (ExecMayReturnRawTuples(appendstate->appendplans[j]))
+ return true;
+ }
+ break;
}
- break;
- }
- /* All projecting node types come here */
+ /* All projecting node types come here */
default:
break;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.10 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execGrouping.c,v 1.11 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static TupleHashTable CurTupleHashTable = NULL;
static uint32 TupleHashTableHash(const void *key, Size keysize);
-static int TupleHashTableMatch(const void *key1, const void *key2,
- Size keysize);
+static int TupleHashTableMatch(const void *key1, const void *key2,
+ Size keysize);
/*****************************************************************************
Assert(entrysize >= sizeof(TupleHashEntryData));
hashtable = (TupleHashTable) MemoryContextAlloc(tablecxt,
- sizeof(TupleHashTableData));
+ sizeof(TupleHashTableData));
hashtable->numCols = numCols;
hashtable->keyColIdx = keyColIdx;
hash_ctl.hcxt = tablecxt;
hashtable->hashtab = hash_create("TupleHashTable", (long) nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_COMPARE | HASH_CONTEXT);
if (hashtable->hashtab == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
/*
* Set up data needed by hash and match functions
*
- * We save and restore CurTupleHashTable just in case someone manages
- * to invoke this code re-entrantly.
+ * We save and restore CurTupleHashTable just in case someone manages to
+ * invoke this code re-entrantly.
*/
hashtable->tupdesc = tupdesc;
saveCurHT = CurTupleHashTable;
/*
* Zero any caller-requested space in the entry. (This zaps
- * the "key data" dynahash.c copied into the new entry, but
- * we don't care since we're about to overwrite it anyway.)
+ * the "key data" dynahash.c copied into the new entry, but we
+ * don't care since we're about to overwrite it anyway.)
*/
MemSet(entry, 0, hashtable->entrysize);
*
* The passed-in key is a pointer to a HeapTuple pointer -- this is either
* the firstTuple field of a TupleHashEntry struct, or the key value passed
- * to hash_search. We ignore the keysize.
+ * to hash_search. We ignore the keysize.
*
* CurTupleHashTable must be set before calling this, since dynahash.c
* doesn't provide any API that would let us get at the hashtable otherwise.
*
* Also, the caller must select an appropriate memory context for running
- * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
+ * the hash functions. (dynahash.c doesn't change CurrentMemoryContext.)
*/
static uint32
TupleHashTableHash(const void *key, Size keysize)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.42 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execJunk.c,v 1.43 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* dealing with a small number of attributes. for large tuples we just
* use palloc.
*
- * Note: we could use just one set of arrays if we were willing to
- * assume that the resno mapping is monotonic... I think it is, but
- * won't take the risk of breaking things right now.
+ * Note: we could use just one set of arrays if we were willing to assume
+ * that the resno mapping is monotonic... I think it is, but won't
+ * take the risk of breaking things right now.
*/
if (cleanLength > 64)
{
*/
for (i = 0; i < cleanLength; i++)
{
- int j = cleanMap[i] - 1;
+ int j = cleanMap[i] - 1;
values[i] = old_values[j];
nulls[i] = old_nulls[j];
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.235 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execMain.c,v 1.236 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Multiple result relations (due to inheritance)
* parseTree->resultRelations identifies them all
*/
- ResultRelInfo *resultRelInfo;
- ListCell *l;
+ ResultRelInfo *resultRelInfo;
+ ListCell *l;
numResultRelations = list_length(resultRelations);
resultRelInfos = (ResultRelInfo *)
/*
* Initialize the junk filter if needed. SELECT and INSERT queries
* need a filter if there are any junk attrs in the tlist. INSERT and
- * SELECT INTO also need a filter if the plan may return raw disk tuples
- * (else heap_insert will be scribbling on the source relation!).
- * UPDATE and DELETE always need a filter, since there's always a junk
- * 'ctid' attribute present --- no need to look first.
+ * SELECT INTO also need a filter if the plan may return raw disk
+ * tuples (else heap_insert will be scribbling on the source
+ * relation!). UPDATE and DELETE always need a filter, since there's
+ * always a junk 'ctid' attribute present --- no need to look first.
*/
{
bool junk_filter_needed = false;
&ctid,
estate->es_snapshot->curcid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
&ctid,
estate->es_snapshot->curcid,
estate->es_crosscheck_snapshot,
- true /* wait for commit */);
+ true /* wait for commit */ );
switch (result)
{
case HeapTupleSelfUpdated:
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.167 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execQual.c,v 1.168 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalAggref(AggrefExprState *aggref,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalVar(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalConst(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalParam(ExprState *exprstate, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static ExprDoneCond ExecEvalFuncArgs(FunctionCallInfo fcinfo,
List *argList, ExprContext *econtext);
static Datum ExecMakeFunctionResultNoSets(FuncExprState *fcache,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFunc(FuncExprState *fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalOper(FuncExprState *fcache, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalDistinct(FuncExprState *fcache, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalScalarArrayOp(ScalarArrayOpExprState *sstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNot(BoolExprState *notclause, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalOr(BoolExprState *orExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalAnd(BoolExprState *andExpr, ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCase(CaseExprState *caseExpr, ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCaseTestExpr(ExprState *exprstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalArray(ArrayExprState *astate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRow(RowExprState *rstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoalesce(CoalesceExprState *coalesceExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullIf(FuncExprState *nullIfExpr,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalNullTest(GenericExprState *nstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalCoerceToDomainValue(ExprState *exprstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFieldSelect(FieldSelectState *fstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalFieldStore(FieldStoreState *fstate,
- ExprContext *econtext,
- bool *isNull, ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull, ExprDoneCond *isDone);
static Datum ExecEvalRelabelType(GenericExprState *exprstate,
ExprContext *econtext,
bool *isNull, ExprDoneCond *isDone);
*
* Note: for notational simplicity we declare these functions as taking the
* specific type of ExprState that they work on. This requires casting when
- * assigning the function pointer in ExecInitExpr. Be careful that the
+ * assigning the function pointer in ExecInitExpr. Be careful that the
* function signature is declared correctly, because the cast suppresses
* automatic checking!
*
isDone));
/*
- * If refexpr yields NULL, and it's a fetch, then result is NULL.
- * In the assignment case, we'll cons up something below.
+ * If refexpr yields NULL, and it's a fetch, then result is NULL. In
+ * the assignment case, we'll cons up something below.
*/
if (*isNull)
{
if (isDone && *isDone == ExprEndResult)
- return (Datum) NULL; /* end of set result */
+ return (Datum) NULL; /* end of set result */
if (!isAssignment)
return (Datum) NULL;
}
*
* XXX At some point we'll need to look into making the old value of
* the array element available via CaseTestExpr, as is done by
- * ExecEvalFieldStore. This is not needed now but will be needed
- * to support arrays of composite types; in an assignment to a field
- * of an array member, the parser would generate a FieldStore that
- * expects to fetch its input tuple via CaseTestExpr.
+ * ExecEvalFieldStore. This is not needed now but will be needed
+ * to support arrays of composite types; in an assignment to a
+ * field of an array member, the parser would generate a
+ * FieldStore that expects to fetch its input tuple via
+ * CaseTestExpr.
*/
sourceData = ExecEvalExpr(astate->refassgnexpr,
econtext,
return PointerGetDatum(array_source);
/*
- * For an assignment, if all the subscripts and the input expression
- * are non-null but the original array is null, then substitute an
- * empty (zero-dimensional) array and proceed with the assignment.
- * This only works for varlena arrays, though; for fixed-length
- * array types we punt and return the null input array.
+ * For an assignment, if all the subscripts and the input
+ * expression are non-null but the original array is null, then
+ * substitute an empty (zero-dimensional) array and proceed with
+ * the assignment. This only works for varlena arrays, though; for
+ * fixed-length array types we punt and return the null input
+ * array.
*/
if (*isNull)
{
- if (astate->refattrlength > 0) /* fixed-length array? */
+ if (astate->refattrlength > 0) /* fixed-length array? */
return PointerGetDatum(array_source);
array_source = construct_md_array(NULL, 0, NULL, NULL,
/*
* Get the slot and attribute number we want
*
- * The asserts check that references to system attributes only appear
- * at the level of a relation scan; at higher levels, system attributes
- * must be treated as ordinary variables (since we no longer have access
- * to the original tuple).
+ * The asserts check that references to system attributes only appear at
+ * the level of a relation scan; at higher levels, system attributes
+ * must be treated as ordinary variables (since we no longer have
+ * access to the original tuple).
*/
attnum = variable->varattno;
tuple_type = slot->ttc_tupleDescriptor;
/*
- * Some checks that are only applied for user attribute numbers
- * (bogus system attnums will be caught inside heap_getattr).
+ * Some checks that are only applied for user attribute numbers (bogus
+ * system attnums will be caught inside heap_getattr).
*/
if (attnum > 0)
{
tuple_type->attrs[attnum - 1] != NULL);
/*
- * If the attribute's column has been dropped, we force a NULL result.
- * This case should not happen in normal use, but it could happen if
- * we are executing a plan cached before the column was dropped.
+ * If the attribute's column has been dropped, we force a NULL
+ * result. This case should not happen in normal use, but it could
+ * happen if we are executing a plan cached before the column was
+ * dropped.
*/
if (tuple_type->attrs[attnum - 1]->attisdropped)
{
}
/*
- * This assert checks that the datatype the plan expects to get (as
- * told by our "variable" argument) is in fact the datatype of the
- * attribute being fetched (as seen in the current context, identified
- * by our "econtext" argument). Otherwise crashes are likely.
+ * This assert checks that the datatype the plan expects to get
+ * (as told by our "variable" argument) is in fact the datatype of
+ * the attribute being fetched (as seen in the current context,
+ * identified by our "econtext" argument). Otherwise crashes are
+ * likely.
*
- * Note that we can't check dropped columns, since their atttypid
- * has been zeroed.
+ * Note that we can't check dropped columns, since their atttypid has
+ * been zeroed.
*/
Assert(variable->vartype == tuple_type->attrs[attnum - 1]->atttypid);
}
else
{
/*
- * All other parameter types must be sought in ecxt_param_list_info.
+ * All other parameter types must be sought in
+ * ecxt_param_list_info.
*/
ParamListInfo paramInfo;
{
RegisterExprContextCallback(econtext,
ShutdownFuncExpr,
- PointerGetDatum(fcache));
+ PointerGetDatum(fcache));
fcache->shutdown_reg = true;
}
}
*
* We change the ExprState function pointer to use the simpler
* ExecMakeFunctionResultNoSets on subsequent calls. This amounts
- * to assuming that no argument can return a set if it didn't do so
- * the first time.
+ * to assuming that no argument can return a set if it didn't do
+ * so the first time.
*/
fcache->xprstate.evalfunc = (ExprStateEvalFunc) ExecMakeFunctionResultNoSets;
}
}
}
- /* fcinfo.isnull = false; */ /* handled by MemSet */
+ /* fcinfo.isnull = false; */ /* handled by MemSet */
result = FunctionCallInvoke(&fcinfo);
*isNull = fcinfo.isnull;
break;
/*
- * Can't do anything useful with NULL rowtype values. Currently
- * we raise an error, but another alternative is to just ignore
- * the result and "continue" to get another row.
+ * Can't do anything useful with NULL rowtype values.
+ * Currently we raise an error, but another alternative is to
+ * just ignore the result and "continue" to get another row.
*/
if (returnsTuple && fcinfo.isnull)
ereport(ERROR,
{
/*
* Use the type info embedded in the rowtype Datum to
- * look up the needed tupdesc. Make a copy for the query.
+ * look up the needed tupdesc. Make a copy for the
+ * query.
*/
- HeapTupleHeader td;
+ HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
tupdesc = lookup_rowtype_tupdesc(HeapTupleHeaderGetTypeId(td),
- HeapTupleHeaderGetTypMod(td));
+ HeapTupleHeaderGetTypMod(td));
tupdesc = CreateTupleDescCopy(tupdesc);
}
else
*/
if (returnsTuple)
{
- HeapTupleHeader td;
+ HeapTupleHeader td;
td = DatumGetHeapTupleHeader(result);
*isDone = ExprSingleResult;
/*
- * If there's a test expression, we have to evaluate it and save
- * the value where the CaseTestExpr placeholders can find it.
- * We must save and restore prior setting of econtext's caseValue fields,
- * in case this node is itself within a larger CASE.
+ * If there's a test expression, we have to evaluate it and save the
+ * value where the CaseTestExpr placeholders can find it. We must save
+ * and restore prior setting of econtext's caseValue fields, in case
+ * this node is itself within a larger CASE.
*/
save_datum = econtext->caseValue_datum;
save_isNull = econtext->caseValue_isNull;
{
econtext->caseValue_datum = ExecEvalExpr(caseExpr->arg,
econtext,
- &econtext->caseValue_isNull,
+ &econtext->caseValue_isNull,
NULL);
}
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("cannot merge incompatible arrays"),
errdetail("Array with element type %s cannot be "
- "included in ARRAY construct with element type %s.",
+ "included in ARRAY construct with element type %s.",
format_type_be(ARR_ELEMTYPE(array)),
format_type_be(element_type))));
if (ndims <= 0 || ndims > MAXDIM)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions (%d) exceeds " \
- "the maximum allowed (%d)", ndims, MAXDIM)));
+ errmsg("number of array dimensions (%d) exceeds " \
+ "the maximum allowed (%d)", ndims, MAXDIM)));
elem_dims = (int *) palloc(elem_ndims * sizeof(int));
memcpy(elem_dims, ARR_DIMS(array), elem_ndims * sizeof(int));
forboth(l1, fstate->newvals, l2, fstore->fieldnums)
{
- ExprState *newval = (ExprState *) lfirst(l1);
- AttrNumber fieldnum = lfirst_int(l2);
+ ExprState *newval = (ExprState *) lfirst(l1);
+ AttrNumber fieldnum = lfirst_int(l2);
bool eisnull;
Assert(fieldnum > 0 && fieldnum <= tupDesc->natts);
/*
- * Use the CaseTestExpr mechanism to pass down the old value of the
- * field being replaced; this is useful in case we have a nested field
- * update situation. It's safe to reuse the CASE mechanism because
- * there cannot be a CASE between here and where the value would be
- * needed.
+ * Use the CaseTestExpr mechanism to pass down the old value of
+ * the field being replaced; this is useful in case we have a
+ * nested field update situation. It's safe to reuse the CASE
+ * mechanism because there cannot be a CASE between here and where
+ * the value would be needed.
*/
econtext->caseValue_datum = values[fieldnum - 1];
econtext->caseValue_isNull = (nulls[fieldnum - 1] == 'n');
break;
case T_RowExpr:
{
- RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *rowexpr = (RowExpr *) node;
RowExprState *rstate = makeNode(RowExprState);
Form_pg_attribute *attrs;
List *outlist = NIL;
/*
* Guard against ALTER COLUMN TYPE on rowtype
* since the RowExpr was created. XXX should we
- * check typmod too? Not sure we can be sure it'll
- * be the same.
+ * check typmod too? Not sure we can be sure
+ * it'll be the same.
*/
if (exprType((Node *) e) != attrs[i]->atttypid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
errmsg("ROW() column has type %s instead of type %s",
- format_type_be(exprType((Node *) e)),
- format_type_be(attrs[i]->atttypid))));
+ format_type_be(exprType((Node *) e)),
+ format_type_be(attrs[i]->atttypid))));
}
else
{
TargetEntry *tle = (TargetEntry *) node;
GenericExprState *gstate = makeNode(GenericExprState);
- gstate->xprstate.evalfunc = NULL; /* not used */
+ gstate->xprstate.evalfunc = NULL; /* not used */
gstate->arg = ExecInitExpr(tle->expr, parent);
state = (ExprState *) gstate;
}
/*
* store the tuple in the projection slot and return the slot.
*/
- return ExecStoreTuple(newTuple, /* tuple to store */
- slot, /* slot to store in */
- InvalidBuffer, /* tuple has no buffer */
+ return ExecStoreTuple(newTuple, /* tuple to store */
+ slot, /* slot to store in */
+ InvalidBuffer, /* tuple has no buffer */
true);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.32 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execScan.c,v 1.33 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return false; /* tlist too long */
/*
- * If the plan context requires a particular hasoid setting, then
- * that has to match, too.
+ * If the plan context requires a particular hasoid setting, then that
+ * has to match, too.
*/
if (ExecContextForcesOids(ps, &hasoid) &&
hasoid != tupdesc->tdhasoid)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.81 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execTuples.c,v 1.82 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static TupleDesc ExecTypeFromTLInternal(List *targetList,
- bool hasoid, bool skipjunk);
+ bool hasoid, bool skipjunk);
/* ----------------------------------------------------------------
/*
* Now allocate our new table along with space for the pointers to the
- * tuples. Zero out the slots.
+ * tuples. Zero out the slots.
*/
newtable = (TupleTable) palloc(sizeof(TupleTableData));
static TupleDesc
ExecTypeFromTLInternal(List *targetList, bool hasoid, bool skipjunk)
{
- TupleDesc typeInfo;
- ListCell *l;
- int len;
- int cur_resno = 1;
+ TupleDesc typeInfo;
+ ListCell *l;
+ int len;
+ int cur_resno = 1;
if (skipjunk)
len = ExecCleanTargetListLength(targetList);
foreach(l, targetList)
{
- TargetEntry *tle = lfirst(l);
- Resdom *resdom = tle->resdom;
+ TargetEntry *tle = lfirst(l);
+ Resdom *resdom = tle->resdom;
if (skipjunk && resdom->resjunk)
continue;
TupleDesc
ExecTypeFromExprList(List *exprList)
{
- TupleDesc typeInfo;
- ListCell *l;
- int cur_resno = 1;
+ TupleDesc typeInfo;
+ ListCell *l;
+ int cur_resno = 1;
char fldname[NAMEDATALEN];
typeInfo = CreateTemplateTupleDesc(list_length(exprList), false);
foreach(l, exprList)
{
- Node *e = lfirst(l);
+ Node *e = lfirst(l);
sprintf(fldname, "f%d", cur_resno);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.113 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/execUtils.c,v 1.114 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
estate->es_direction = ForwardScanDirection;
estate->es_snapshot = SnapshotNow;
- estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
+ estate->es_crosscheck_snapshot = SnapshotAny; /* means no crosscheck */
estate->es_range_table = NIL;
estate->es_result_relations = NULL;
*/
while (estate->es_exprcontexts)
{
- /* XXX: seems there ought to be a faster way to implement this
+ /*
+ * XXX: seems there ought to be a faster way to implement this
* than repeated list_delete(), no?
*/
FreeExprContext((ExprContext *) linitial(estate->es_exprcontexts));
* ReScanExprContext
*
* Reset an expression context in preparation for a rescan of its
- * plan node. This requires calling any registered shutdown callbacks,
+ * plan node. This requires calling any registered shutdown callbacks,
* since any partially complete set-returning-functions must be canceled.
*
* Note we make no assumption about the caller's memory context.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.85 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/functions.c,v 1.86 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct
{
- Oid *argtypes; /* resolved types of arguments */
+ Oid *argtypes; /* resolved types of arguments */
Oid rettype; /* actual return type */
int typlen; /* length of the return type */
bool typbyval; /* true if return type is pass by value */
{
execution_state *firstes = NULL;
execution_state *preves = NULL;
- ListCell *qtl_item;
+ ListCell *qtl_item;
foreach(qtl_item, queryTree_list)
{
typeStruct = (Form_pg_type) GETSTRUCT(typeTuple);
/*
- * get the type length and by-value flag from the type tuple; also
- * do a preliminary check for returnsTuple (this may prove inaccurate,
+ * get the type length and by-value flag from the type tuple; also do
+ * a preliminary check for returnsTuple (this may prove inaccurate,
* see below).
*/
fcache->typlen = typeStruct->typlen;
rettype == RECORDOID);
/*
- * Parse and rewrite the queries. We need the argument type info to pass
- * to the parser.
+ * Parse and rewrite the queries. We need the argument type info to
+ * pass to the parser.
*/
nargs = procedureStruct->pronargs;
haspolyarg = false;
* If the function has any arguments declared as polymorphic types,
* then it wasn't type-checked at definition time; must do so now.
*
- * Also, force a type-check if the declared return type is a rowtype;
- * we need to find out whether we are actually returning the whole
- * tuple result, or just regurgitating a rowtype expression result.
- * In the latter case we clear returnsTuple because we need not act
- * different from the scalar result case.
+ * Also, force a type-check if the declared return type is a rowtype; we
+ * need to find out whether we are actually returning the whole tuple
+ * result, or just regurgitating a rowtype expression result. In the
+ * latter case we clear returnsTuple because we need not act different
+ * from the scalar result case.
*/
if (haspolyarg || fcache->returnsTuple)
fcache->returnsTuple = check_sql_fn_retval(rettype,
* XXX do we need to remove junk attrs from the result tuple?
* Probably OK to leave them, as long as they are at the end.
*/
- HeapTupleHeader dtup;
- Oid dtuptype;
- int32 dtuptypmod;
+ HeapTupleHeader dtup;
+ Oid dtuptype;
+ int32 dtuptypmod;
dtup = (HeapTupleHeader) palloc(tup->t_len);
memcpy((char *) dtup, (char *) tup->t_data, tup->t_len);
else
{
/*
- * Returning a scalar, which we have to extract from the
- * first column of the SELECT result, and then copy into current
+ * Returning a scalar, which we have to extract from the first
+ * column of the SELECT result, and then copy into current
* execution context if needed.
*/
value = heap_getattr(tup, 1, tupDesc, &(fcinfo->isnull));
fn_name = NameStr(functup->proname);
/*
- * If there is a syntax error position, convert to internal syntax error
+ * If there is a syntax error position, convert to internal syntax
+ * error
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeAgg.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * If we are reinitializing after a group boundary, we have to free
- * any prior transValue to avoid memory leakage. We must check not
- * only the isnull flag but whether the pointer is NULL; since
- * pergroupstate is initialized with palloc0, the initial condition
- * has isnull = 0 and null pointer.
+ * If we are reinitializing after a group boundary, we have to
+ * free any prior transValue to avoid memory leakage. We must
+ * check not only the isnull flag but whether the pointer is NULL;
+ * since pergroupstate is initialized with palloc0, the initial
+ * condition has isnull = 0 and null pointer.
*/
if (!peraggstate->transtypeByVal &&
!pergroupstate->transValueIsNull &&
/*
* If we have no first tuple (ie, the outerPlan didn't return
* anything), create a dummy all-nulls input tuple for use by
- * ExecQual/ExecProject. 99.44% of the time this is a waste of cycles,
- * because ordinarily the projected output tuple's targetlist
- * cannot contain any direct (non-aggregated) references to input
- * columns, so the dummy tuple will not be referenced. However
- * there are special cases where this isn't so --- in particular
- * an UPDATE involving an aggregate will have a targetlist
- * reference to ctid. We need to return a null for ctid in that
- * situation, not coredump.
+ * ExecQual/ExecProject. 99.44% of the time this is a waste of
+ * cycles, because ordinarily the projected output tuple's
+ * targetlist cannot contain any direct (non-aggregated)
+ * references to input columns, so the dummy tuple will not be
+ * referenced. However there are special cases where this isn't so
+ * --- in particular an UPDATE involving an aggregate will have a
+ * targetlist reference to ctid. We need to return a null for
+ * ctid in that situation, not coredump.
*
* The values returned for the aggregates will be the initial values
* of the transition functions.
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate results
- * and the representative input tuple. Note we do not support
- * aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate
+ * results and the representative input tuple. Note we do not
+ * support aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
if (ExecQual(aggstate->ss.ps.qual, econtext, false))
{
/*
- * Form and return a projection tuple using the aggregate results
- * and the representative input tuple. Note we do not support
- * aggregates returning sets ...
+ * Form and return a projection tuple using the aggregate
+ * results and the representative input tuple. Note we do not
+ * support aggregates returning sets ...
*/
return ExecProject(projInfo, NULL);
}
}
else
{
- /* Reset the per-group state (in particular, mark transvalues null) */
+ /*
+ * Reset the per-group state (in particular, mark transvalues
+ * null)
+ */
MemSet(node->pergroup, 0,
sizeof(AggStatePerGroupData) * node->numaggs);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.63 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeHashjoin.c,v 1.64 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (nread != sizeof(HeapTupleData))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: %m")));
+ errmsg("could not read from hash-join temporary file: %m")));
heapTuple = palloc(HEAPTUPLESIZE + htup.t_len);
memcpy((char *) heapTuple, (char *) &htup, sizeof(HeapTupleData));
heapTuple->t_datamcxt = CurrentMemoryContext;
if (nread != (size_t) htup.t_len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read from hash-join temporary file: %m")));
+ errmsg("could not read from hash-join temporary file: %m")));
return ExecStoreTuple(heapTuple, tupleSlot, InvalidBuffer, true);
}
if (BufFileSeek(hashtable->outerBatchFile[newbatch - 1], 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
innerFile = hashtable->innerBatchFile[newbatch - 1];
if (BufFileSeek(innerFile, 0, 0L, SEEK_SET))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not rewind hash-join temporary file: %m")));
+ errmsg("could not rewind hash-join temporary file: %m")));
/*
* Reload the hash table with the new inner batch
if (written != sizeof(HeapTupleData))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to hash-join temporary file: %m")));
+ errmsg("could not write to hash-join temporary file: %m")));
written = BufFileWrite(file, (void *) heapTuple->t_data, heapTuple->t_len);
if (written != (size_t) heapTuple->t_len)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write to hash-join temporary file: %m")));
+ errmsg("could not write to hash-join temporary file: %m")));
}
void
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.96 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.97 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* In a multiple-index plan, we must take care to return any given tuple
* only once, even if it matches conditions of several index scans. Our
* preferred way to do this is to record already-returned tuples in a hash
- * table (using the TID as unique identifier). However, in a very large
+ * table (using the TID as unique identifier). However, in a very large
* scan this could conceivably run out of memory. We limit the hash table
* to no more than work_mem KB; if it grows past that, we fall back to the
* pre-7.4 technique: evaluate the prior-scan index quals again for each
scanrelid = ((IndexScan *) node->ss.ps.plan)->scan.scanrelid;
/*
- * Clear any reference to the previously returned tuple. The idea here
- * is to not have the tuple slot be the last holder of a pin on that
- * tuple's buffer; if it is, we'll need a separate visit to the bufmgr
- * to release the buffer. By clearing here, we get to have the release
- * done by ReleaseAndReadBuffer inside index_getnext.
+ * Clear any reference to the previously returned tuple. The idea
+ * here is to not have the tuple slot be the last holder of a pin on
+ * that tuple's buffer; if it is, we'll need a separate visit to the
+ * bufmgr to release the buffer. By clearing here, we get to have the
+ * release done by ReleaseAndReadBuffer inside index_getnext.
*/
ExecClearTuple(slot);
false); /* don't pfree */
/*
- * If any of the index operators involved in this scan are lossy,
- * recheck them by evaluating the original operator clauses.
+ * If any of the index operators involved in this scan are
+ * lossy, recheck them by evaluating the original operator
+ * clauses.
*/
if (lossyQual)
{
ResetExprContext(econtext);
if (!ExecQual(lossyQual, econtext, false))
{
- /* Fails lossy op, so drop it and loop back for another */
+ /*
+ * Fails lossy op, so drop it and loop back for
+ * another
+ */
ExecClearTuple(slot);
continue;
}
}
/*
- * If it's a multiple-index scan, make sure not to double-report
- * a tuple matched by more than one index. (See notes above.)
+ * If it's a multiple-index scan, make sure not to
+ * double-report a tuple matched by more than one index. (See
+ * notes above.)
*/
if (numIndices > 1)
{
if (node->iss_DupHash)
{
DupHashTabEntry *entry;
- bool found;
+ bool found;
entry = (DupHashTabEntry *)
hash_search(node->iss_DupHash,
HASH_ENTER,
&found);
if (entry == NULL ||
- node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
+ node->iss_DupHash->hctl->nentries > node->iss_MaxHash)
{
/* out of memory (either hard or soft limit) */
/* release hash table and fall thru to old code */
* initialize child expressions
*
* Note: we don't initialize all of the indxqual expression, only the
- * sub-parts corresponding to runtime keys (see below). The indxqualorig
- * expression is always initialized even though it will only be used in
- * some uncommon cases --- would be nice to improve that. (Problem is
- * that any SubPlans present in the expression must be found now...)
+ * sub-parts corresponding to runtime keys (see below). The
+ * indxqualorig expression is always initialized even though it will
+ * only be used in some uncommon cases --- would be nice to improve
+ * that. (Problem is that any SubPlans present in the expression must
+ * be found now...)
*/
indexstate->ss.ps.targetlist = (List *)
ExecInitExpr((Expr *) node->scan.plan.targetlist,
lossyflag_cell = list_head(lossyflags);
for (j = 0; j < n_keys; j++)
{
- OpExpr *clause; /* one clause of index qual */
- Expr *leftop; /* expr on lhs of operator */
- Expr *rightop; /* expr on rhs ... */
+ OpExpr *clause; /* one clause of index qual */
+ Expr *leftop; /* expr on lhs of operator */
+ Expr *rightop; /* expr on rhs ... */
int flags = 0;
AttrNumber varattno; /* att number used in scan */
StrategyNumber strategy; /* op's strategy number */
- Oid subtype; /* op's strategy subtype */
- int lossy; /* op's recheck flag */
+ Oid subtype; /* op's strategy subtype */
+ int lossy; /* op's recheck flag */
RegProcedure opfuncid; /* operator proc id used in scan */
Datum scanvalue; /* value used in scan (if const) */
/*
* Here we figure out the contents of the index qual. The
* usual case is (var op const) which means we form a scan key
- * for the attribute listed in the var node and use the value of
- * the const as comparison data.
+ * for the attribute listed in the var node and use the value
+ * of the const as comparison data.
*
* If we don't have a const node, it means our scan key is a
- * function of information obtained during the execution of the
- * plan, in which case we need to recalculate the index scan key
- * at run time. Hence, we set have_runtime_keys to true and place
- * the appropriate subexpression in run_keys. The corresponding
- * scan key values are recomputed at run time.
+ * function of information obtained during the execution of
+ * the plan, in which case we need to recalculate the index
+ * scan key at run time. Hence, we set have_runtime_keys to
+ * true and place the appropriate subexpression in run_keys.
+ * The corresponding scan key values are recomputed at run
+ * time.
*/
run_keys[j] = NULL;
scanvalue); /* constant */
/*
- * If this operator is lossy, add its indxqualorig
- * expression to the list of quals to recheck. The
- * list_nth() calls here could be avoided by chasing the
- * lists in parallel to all the other lists, but since
- * lossy operators are very uncommon, it's probably a
- * waste of time to do so.
+ * If this operator is lossy, add its indxqualorig expression
+ * to the list of quals to recheck. The list_nth() calls here
+ * could be avoided by chasing the lists in parallel to all
+ * the other lists, but since lossy operators are very
+ * uncommon, it's probably a waste of time to do so.
*/
if (lossy)
{
- List *qualOrig = indexstate->indxqualorig;
+ List *qualOrig = indexstate->indxqualorig;
+
lossyQuals[i] = lappend(lossyQuals[i],
- list_nth((List *) list_nth(qualOrig, i), j));
+ list_nth((List *) list_nth(qualOrig, i), j));
}
}
node->iss_DupHash = hash_create("DupHashTable",
nbuckets,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
if (node->iss_DupHash == NULL)
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.67 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeMergejoin.c,v 1.68 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
MJFormSkipQuals(List *qualList, List **ltQuals, List **gtQuals,
PlanState *parent)
{
- List *ltexprs,
- *gtexprs;
- ListCell *ltcdr,
- *gtcdr;
+ List *ltexprs,
+ *gtexprs;
+ ListCell *ltcdr,
+ *gtcdr;
/*
* Make modifiable copies of the qualList.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.49 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSeqscan.c,v 1.50 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
slot = node->ss_ScanTupleSlot;
/*
- * Clear any reference to the previously returned tuple. The idea here
- * is to not have the tuple slot be the last holder of a pin on that
- * tuple's buffer; if it is, we'll need a separate visit to the bufmgr
- * to release the buffer. By clearing here, we get to have the release
- * done by ReleaseAndReadBuffer inside heap_getnext.
+ * Clear any reference to the previously returned tuple. The idea
+ * here is to not have the tuple slot be the last holder of a pin on
+ * that tuple's buffer; if it is, we'll need a separate visit to the
+ * bufmgr to release the buffer. By clearing here, we get to have the
+ * release done by ReleaseAndReadBuffer inside heap_getnext.
*/
ExecClearTuple(slot);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.64 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeSubplan.c,v 1.65 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
SubLinkType subLinkType = subplan->subLinkType;
MemoryContext oldcontext;
TupleTableSlot *slot;
- ListCell *l;
+ ListCell *l;
bool found = false;
ArrayBuildState *astate = NULL;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.43 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/nodeUnique.c,v 1.44 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* he next calls us.
*
* tgl 3/2004: the above concern is no longer valid; junkfilters used to
- * modify their input's return slot but don't anymore, and I don't think
- * anyplace else does either. Not worth changing this code though.
+ * modify their input's return slot but don't anymore, and I don't
+ * think anyplace else does either. Not worth changing this code
+ * though.
*/
if (node->priorTuple != NULL)
heap_freetuple(node->priorTuple);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.124 2004/08/29 04:12:31 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/executor/spi.c,v 1.125 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static _SPI_connection *_SPI_stack = NULL;
static _SPI_connection *_SPI_current = NULL;
-static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
+static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
static int _SPI_connected = -1;
static int _SPI_curid = -1;
static int _SPI_execute(const char *src, int tcount, _SPI_plan *plan);
-static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
- bool useCurrentSnapshot, int tcount);
+static int _SPI_pquery(QueryDesc *queryDesc, bool runit,
+ bool useCurrentSnapshot, int tcount);
static int _SPI_execute_plan(_SPI_plan *plan,
- Datum *Values, const char *Nulls,
- bool useCurrentSnapshot, int tcount);
+ Datum *Values, const char *Nulls,
+ bool useCurrentSnapshot, int tcount);
static void _SPI_error_callback(void *arg);
int
SPI_connect(void)
{
- int newdepth;
+ int newdepth;
/*
* When procedure called by Executor _SPI_curid expected to be equal
/*
* Create memory contexts for this procedure
*
- * XXX it would be better to use PortalContext as the parent context,
- * but we may not be inside a portal (consider deferred-trigger
- * execution). Perhaps CurTransactionContext would do? For now it
+ * XXX it would be better to use PortalContext as the parent context, but
+ * we may not be inside a portal (consider deferred-trigger
+ * execution). Perhaps CurTransactionContext would do? For now it
* doesn't matter because we clean up explicitly in AtEOSubXact_SPI().
*/
_SPI_current->procCxt = AllocSetContextCreate(TopTransactionContext,
void
AtEOSubXact_SPI(bool isCommit, TransactionId childXid)
{
- bool found = false;
+ bool found = false;
while (_SPI_connected >= 0)
{
found = true;
/*
- * Pop the stack entry and reset global variables. Unlike
+ * Pop the stack entry and reset global variables. Unlike
* SPI_finish(), we don't risk switching to memory contexts that
- * might be already gone, or deleting memory contexts that have been
- * or will be thrown away anyway.
+ * might be already gone, or deleting memory contexts that have
+ * been or will be thrown away anyway.
*/
_SPI_connected--;
_SPI_curid = _SPI_connected;
SPI_returntuple(HeapTuple tuple, TupleDesc tupdesc)
{
MemoryContext oldcxt = NULL;
- HeapTupleHeader dtup;
+ HeapTupleHeader dtup;
if (tuple == NULL || tupdesc == NULL)
{
Oid
SPI_getargtypeid(void *plan, int argIndex)
{
- if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan*)plan)->nargs)
+ if (plan == NULL || argIndex < 0 || argIndex >= ((_SPI_plan *) plan)->nargs)
{
SPI_result = SPI_ERROR_ARGUMENT;
return InvalidOid;
* if the command can be used with SPI_cursor_open
*
* Parameters
- * plan A plan previously prepared using SPI_prepare
+ * plan A plan previously prepared using SPI_prepare
*/
bool
SPI_is_cursor_plan(void *plan)
{
- _SPI_plan *spiplan = (_SPI_plan *) plan;
- List *qtlist;
+ _SPI_plan *spiplan = (_SPI_plan *) plan;
+ List *qtlist;
if (spiplan == NULL)
{
qtlist = spiplan->qtlist;
if (list_length(spiplan->ptlist) == 1 && list_length(qtlist) == 1)
{
- Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
+ Query *queryTree = (Query *) linitial((List *) linitial(qtlist));
if (queryTree->commandType == CMD_SELECT && queryTree->into == NULL)
return true;
/*
* SPI_result_code_string --- convert any SPI return code to a string
*
- * This is often useful in error messages. Most callers will probably
+ * This is often useful in error messages. Most callers will probably
* only pass negative (error-case) codes, but for generality we recognize
* the success codes too.
*/
int syntaxerrposition;
/*
- * If there is a syntax error position, convert to internal syntax error;
- * otherwise treat the query as an item of context stack
+ * If there is a syntax error position, convert to internal syntax
+ * error; otherwise treat the query as an item of context stack
*/
syntaxerrposition = geterrposition();
if (syntaxerrposition > 0)
parentcxt = _SPI_current->procCxt;
else if (location == _SPI_CPLAN_TOPCXT)
parentcxt = TopMemoryContext;
- else /* (this case not currently used) */
+ else
+/* (this case not currently used) */
parentcxt = CurrentMemoryContext;
/*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.39 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/lib/stringinfo.c,v 1.40 2004/08/29 05:06:42 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Guard against ridiculous "needed" values, which can occur if we're
- * fed bogus data. Without this, we can get an overflow or infinite
+ * fed bogus data. Without this, we can get an overflow or infinite
* loop in the following.
*/
if (needed < 0 ||
newlen = 2 * newlen;
/*
- * Clamp to MaxAllocSize in case we went past it. Note we are assuming
- * here that MaxAllocSize <= INT_MAX/2, else the above loop could
- * overflow. We will still have newlen >= needed.
+ * Clamp to MaxAllocSize in case we went past it. Note we are
+ * assuming here that MaxAllocSize <= INT_MAX/2, else the above loop
+ * could overflow. We will still have newlen >= needed.
*/
if (newlen > (int) MaxAllocSize)
newlen = (int) MaxAllocSize;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.117 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/auth.c,v 1.118 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errstr = gettext_noop("PAM authentication failed for user \"%s\"");
break;
#endif /* USE_PAM */
- default :
+ default:
errstr = gettext_noop("Unknown auth method: authentication failed for user \"%s\"");
break;
}
break;
case uaIdent:
+
/*
* If we are doing ident on unix-domain sockets, use SCM_CREDS
* only if it is defined and SO_PEERCRED isn't.
if (port->raddr.addr.ss_family == AF_UNIX)
{
#if defined(HAVE_STRUCT_FCRED) || defined(HAVE_STRUCT_SOCKCRED)
+
/*
* Receive credentials on next message receipt, BSD/OS,
* NetBSD. We need to set this before the client sends the
if (setsockopt(port->sock, 0, LOCAL_CREDS, &on, sizeof(on)) < 0)
ereport(FATAL,
(errcode_for_socket_access(),
- errmsg("could not enable credential reception: %m")));
+ errmsg("could not enable credential reception: %m")));
#endif
sendAuthRequest(port, AUTH_REQ_SCM_CREDS);
if (mtype != EOF)
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
- errmsg("expected password response, got message type %d",
- mtype)));
+ errmsg("expected password response, got message type %d",
+ mtype)));
return NULL; /* EOF or bad message type */
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.73 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-fsstubs.c,v 1.74 2004/08/29 05:06:43 momjian Exp $
*
* NOTES
* This should be moved to a more appropriate place. It is here
currentContext = MemoryContextSwitchTo(fscxt);
/*
- * Close LO fds and clear cookies array so that LO fds are no longer good.
- * On abort we skip the close step.
+ * Close LO fds and clear cookies array so that LO fds are no longer
+ * good. On abort we skip the close step.
*/
for (i = 0; i < cookies_size; i++)
{
/*
* AtEOSubXact_LargeObject
- * Take care of large objects at subtransaction commit/abort
+ * Take care of large objects at subtransaction commit/abort
*
* Reassign LOs created/opened during a committing subtransaction
* to the parent transaction. On abort, just close them.
AtEOSubXact_LargeObject(bool isCommit, TransactionId myXid,
TransactionId parentXid)
{
- int i;
+ int i;
if (fscxt == NULL) /* no LO operations in this xact */
return;
else
{
/*
- * Make sure we do not call inv_close twice if it errors out
- * for some reason. Better a leak than a crash.
+ * Make sure we do not call inv_close twice if it errors
+ * out for some reason. Better a leak than a crash.
*/
deleteLOfd(i);
inv_close(lo);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.47 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/be-secure.c,v 1.48 2004/08/29 05:06:43 momjian Exp $
*
* Since the server static private key ($DataDir/server.key)
* will normally be stored unencrypted so that the database
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("unrecognized SSL error code %d",
- SSL_get_error(port->ssl, n))));
+ SSL_get_error(port->ssl, n))));
n = -1;
break;
}
ereport(COMMERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("unrecognized SSL error code %d",
- SSL_get_error(port->ssl, n))));
+ SSL_get_error(port->ssl, n))));
n = -1;
break;
}
if (r == NULL || 8 * DH_size(r) < keylength)
{
ereport(DEBUG2,
- (errmsg_internal("DH: generating parameters (%d bits)....",
- keylength)));
+ (errmsg_internal("DH: generating parameters (%d bits)....",
+ keylength)));
r = DH_generate_parameters(keylength, DH_GENERATOR_2, NULL, NULL);
}
errmsg("could not access private key file \"%s\": %m",
fnbuf)));
- /*
+ /*
* Require no public access to key file.
*
* XXX temporarily suppress check when on Windows, because there may
- * not be proper support for Unix-y file permissions. Need to think
- * of a reasonable check to apply on Windows. (See also the data
- * directory permission check in postmaster.c)
+ * not be proper support for Unix-y file permissions. Need to
+ * think of a reasonable check to apply on Windows. (See also the
+ * data directory permission check in postmaster.c)
*/
#if !defined(__CYGWIN__) && !defined(WIN32)
if (!S_ISREG(buf.st_mode) || (buf.st_mode & (S_IRWXG | S_IRWXO)) ||
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.128 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/hba.c,v 1.129 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Max size of username ident server can return */
#define IDENT_USERNAME_MAX 512
-/* Standard TCP port number for Ident service. Assigned by IANA */
+/* Standard TCP port number for Ident service. Assigned by IANA */
#define IDENT_PORT 113
-/* Name of the config file */
+/* Name of the config file */
#define CONF_FILE "pg_hba.conf"
/* Name of the usermap file */
*/
/* pre-parsed content of CONF_FILE and corresponding line #s */
-static List *hba_lines = NIL;
-static List *hba_line_nums = NIL;
+static List *hba_lines = NIL;
+static List *hba_line_nums = NIL;
+
/* pre-parsed content of USERMAP_FILE and corresponding line #s */
-static List *ident_lines = NIL;
-static List *ident_line_nums = NIL;
+static List *ident_lines = NIL;
+static List *ident_line_nums = NIL;
+
/* pre-parsed content of group file and corresponding line #s */
-static List *group_lines = NIL;
-static List *group_line_nums = NIL;
+static List *group_lines = NIL;
+static List *group_line_nums = NIL;
+
/* pre-parsed content of user passwd file and corresponding line #s */
-static List *user_lines = NIL;
-static List *user_line_nums = NIL;
+static List *user_lines = NIL;
+static List *user_line_nums = NIL;
/* sorted entries so we can do binary search lookups */
static List **user_sorted = NULL; /* sorted user list, for bsearch() */
char *end_buf = buf + (bufsz - 2);
bool in_quote = false;
bool was_quote = false;
- bool saw_quote = false;
+ bool saw_quote = false;
Assert(end_buf > start_buf);
}
/*
- * Build a token in buf of next characters up to EOF, EOL,
- * unquoted comma, or unquoted whitespace.
+ * Build a token in buf of next characters up to EOF, EOL, unquoted
+ * comma, or unquoted whitespace.
*/
while (c != EOF && c != '\n' &&
(!pg_isblank(c) || in_quote == true))
*buf = '\0';
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("authentication file token too long, skipping: \"%s\"",
- start_buf)));
+ errmsg("authentication file token too long, skipping: \"%s\"",
+ start_buf)));
/* Discard remainder of line */
while ((c = getc(fp)) != EOF && c != '\n')
;
*buf = '\0';
- if (!saw_quote &&
- (strcmp(start_buf, "all") == 0 ||
- strcmp(start_buf, "sameuser") == 0 ||
- strcmp(start_buf, "samegroup") == 0))
+ if (!saw_quote &&
+ (strcmp(start_buf, "all") == 0 ||
+ strcmp(start_buf, "sameuser") == 0 ||
+ strcmp(start_buf, "samegroup") == 0))
{
/* append newline to a magical keyword */
*buf++ = '\n';
if (*lines)
{
/*
- * "lines" is a list of lists; each of those sublists consists
- * of palloc'ed tokens, so we want to free each pointed-to
- * token in a sublist, followed by the sublist itself, and
- * finally the whole list.
+ * "lines" is a list of lists; each of those sublists consists of
+ * palloc'ed tokens, so we want to free each pointed-to token in a
+ * sublist, followed by the sublist itself, and finally the whole
+ * list.
*/
ListCell *line;
/* Create comma-separate string from List */
foreach(line, inc_lines)
{
- List *token_list = (List *) lfirst(line);
- ListCell *token;
+ List *token_list = (List *) lfirst(line);
+ ListCell *token;
foreach(token, token_list)
{
/*
* Lookup a user name in the pg_shadow file
*/
-List **
+List **
get_user_line(const char *user)
{
/* On some versions of Solaris, bsearch of zero items dumps core */
if ((line = get_group_line(group)) != NULL)
{
- ListCell *line_item;
+ ListCell *line_item;
/* skip over the group name */
for_each_cell(line_item, lnext(list_head(*line)))
if (addr.ss_family != port->raddr.addr.ss_family)
{
/*
- * Wrong address family. We allow only one case: if the
- * file has IPv4 and the port is IPv6, promote the file
- * address to IPv6 and try to match that way.
+ * Wrong address family. We allow only one case: if the file
+ * has IPv4 and the port is IPv6, promote the file address to
+ * IPv6 and try to match that way.
*/
#ifdef HAVE_IPV6
if (addr.ss_family == AF_INET &&
promote_v4_to_v6_mask(&mask);
}
else
-#endif /* HAVE_IPV6 */
+#endif /* HAVE_IPV6 */
{
/* Line doesn't match client port, so ignore it. */
return;
else
ereport(LOG,
(errcode(ERRCODE_CONFIG_FILE_ERROR),
- errmsg("missing field in pg_hba.conf file at end of line %d",
- line_num)));
+ errmsg("missing field in pg_hba.conf file at end of line %d",
+ line_num)));
/* Come here if suitable message already logged */
hba_other_error:
conf_file = pstrdup(guc_hbafile);
else
{
- char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
+ char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
+
/* put together the full pathname to the config file */
conf_file = palloc(strlen(confloc) + strlen(CONF_FILE) + 2);
sprintf(conf_file, "%s/%s", confloc, CONF_FILE);
}
else
{
- ListCell *line_cell, *num_cell;
+ ListCell *line_cell,
+ *num_cell;
forboth(line_cell, ident_lines, num_cell, ident_line_nums)
{
FILE *file; /* The map file we have to read */
char *map_file; /* The name of the map file we have to
* read */
+
if (ident_lines || ident_line_nums)
free_lines(&ident_lines, &ident_line_nums);
else
{
/* put together the full pathname to the map file */
- char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
+ char *confloc = (user_pgconfig_is_dir) ? user_pgconfig : DataDir;
+
map_file = (char *) palloc(strlen(confloc) + strlen(USERMAP_FILE) + 2);
sprintf(map_file, "%s/%s", confloc, USERMAP_FILE);
}
-
+
file = AllocateFile(map_file, "r");
if (file == NULL)
{
interpret_ident_response(const char *ident_response,
char *ident_user)
{
- const char *cursor = ident_response; /* Cursor into
+ const char *cursor = ident_response; /* Cursor into
* *ident_response */
/*
hints.ai_addr = NULL;
hints.ai_next = NULL;
rc = getaddrinfo_all(remote_addr_s, ident_port, &hints, &ident_serv);
- if (rc || !ident_serv) {
+ if (rc || !ident_serv)
+ {
if (ident_serv)
freeaddrinfo_all(hints.ai_family, ident_serv);
return false; /* we don't expect this to happen */
hints.ai_addr = NULL;
hints.ai_next = NULL;
rc = getaddrinfo_all(local_addr_s, NULL, &hints, &la);
- if (rc || !la) {
+ if (rc || !la)
+ {
if (la)
freeaddrinfo_all(hints.ai_family, la);
return false; /* we don't expect this to happen */
ident_return = interpret_ident_response(ident_response, ident_user);
if (!ident_return)
ereport(LOG,
- (errmsg("invalidly formatted response from Ident server: \"%s\"",
- ident_response)));
+ (errmsg("invalidly formatted response from Ident server: \"%s\"",
+ ident_response)));
ident_inet_done:
if (sock_fd >= 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.27 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/ip.c,v 1.28 2004/08/29 05:06:43 momjian Exp $
*
* This file and the IPV6 implementation were initially provided by
* Nigel Kukard , Linux Based Systems Design
#endif
#include
-
-#endif /* !defined(_MSC_VER) && !defined(__BORLANDC__) */
+#endif /* !defined(_MSC_VER) &&
+ * !defined(__BORLANDC__) */
#include "libpq/ip.h"
*/
int
getaddrinfo_all(const char *hostname, const char *servname,
- const struct addrinfo *hintp, struct addrinfo **result)
+ const struct addrinfo * hintp, struct addrinfo ** result)
{
/* not all versions of getaddrinfo() zero *result on failure */
*result = NULL;
return 0;
}
-
#endif /* HAVE_UNIX_SOCKETS */
return 1;
}
-
#endif
/*
memcpy(addr, &addr6, sizeof(addr6));
}
-#endif /* HAVE_IPV6 */
+#endif /* HAVE_IPV6 */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.170 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqcomm.c,v 1.171 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
ereport(LOG,
(errcode_for_socket_access(),
- /* translator: %s is IPv4, IPv6, or Unix */
+ /* translator: %s is IPv4, IPv6, or Unix */
errmsg("could not create %s socket: %m",
familyDesc)));
continue;
{
ereport(LOG,
(errcode_for_socket_access(),
- /* translator: %s is IPv4, IPv6, or Unix */
+ /* translator: %s is IPv4, IPv6, or Unix */
errmsg("could not bind %s socket: %m",
familyDesc),
(IS_AF_UNIX(addr->ai_family)) ?
{
ereport(LOG,
(errcode_for_socket_access(),
- /* translator: %s is IPv4, IPv6, or Unix */
+ /* translator: %s is IPv4, IPv6, or Unix */
errmsg("could not listen on %s socket: %m",
familyDesc)));
closesocket(fd);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.36 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/libpq/pqsignal.c,v 1.37 2004/08/29 05:06:43 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
sigset_t UnBlockSig,
BlockSig,
AuthBlockSig;
+
#else
int UnBlockSig,
BlockSig,
#endif /* !HAVE_POSIX_SIGNALS */
}
-#endif /* WIN32 */
+#endif /* WIN32 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.88 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/main/main.c,v 1.89 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (err != 0)
{
write_stderr("%s: WSAStartup failed: %d\n",
- argv[0], err);
+ argv[0], err);
exit(1);
}
write_stderr("\"root\" execution of the PostgreSQL server is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !__BEOS__ */
argv[0]);
exit(1);
}
-#else /* WIN32 */
+#else /* WIN32 */
if (pgwin32_is_admin())
{
write_stderr("execution of PostgreSQL by a user with administrative permissions is not permitted.\n"
"The server must be started under an unprivileged user ID to prevent\n"
"possible system security compromise. See the documentation for\n"
- "more information on how to properly start the server.\n");
+ "more information on how to properly start the server.\n");
exit(1);
}
#endif /* !WIN32 */
#endif
/*
- * If the first argument is "-boot", then invoke bootstrap mode.
- * (This path is taken only for a standalone bootstrap process.)
+ * If the first argument is "-boot", then invoke bootstrap mode. (This
+ * path is taken only for a standalone bootstrap process.)
*/
if (argc > 1 && strcmp(argv[1], "-boot") == 0)
exit(BootstrapMain(argc, argv));
if (!GetUserName(pw_name_persist, &namesize))
{
write_stderr("%s: could not determine user name (GetUserName failed)\n",
- argv[0]);
+ argv[0]);
exit(1);
}
}
-#endif /* WIN32 */
+#endif /* WIN32 */
exit(PostgresMain(argc, argv, pw_name_persist));
}
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.291 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/copyfuncs.c,v 1.292 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static RowExpr *
_copyRowExpr(RowExpr *from)
{
- RowExpr *newnode = makeNode(RowExpr);
+ RowExpr *newnode = makeNode(RowExpr);
COPY_NODE_FIELD(args);
COPY_SCALAR_FIELD(row_typeid);
static SortBy *
_copySortBy(SortBy *from)
{
- SortBy *newnode = makeNode(SortBy);
+ SortBy *newnode = makeNode(SortBy);
COPY_SCALAR_FIELD(sortby_kind);
COPY_NODE_FIELD(useOp);
static List *
_copyList(List *from)
{
- List *new;
- ListCell *curr_old;
- ListCell *prev_new;
+ List *new;
+ ListCell *curr_old;
+ ListCell *prev_new;
Assert(list_length(from) >= 1);
case T_List:
retval = _copyList(from);
break;
+
/*
- * Lists of integers and OIDs don't need to be
- * deep-copied, so we perform a shallow copy via
- * list_copy()
+ * Lists of integers and OIDs don't need to be deep-copied, so
+ * we perform a shallow copy via list_copy()
*/
case T_IntList:
case T_OidList:
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.230 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/equalfuncs.c,v 1.231 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool
_equalList(List *a, List *b)
{
- ListCell *item_a;
- ListCell *item_b;
+ ListCell *item_a;
+ ListCell *item_b;
/*
- * Try to reject by simple scalar checks before grovelling through
- * all the list elements...
+ * Try to reject by simple scalar checks before grovelling through all
+ * the list elements...
*/
COMPARE_SCALAR_FIELD(type);
COMPARE_SCALAR_FIELD(length);
/*
- * We place the switch outside the loop for the sake of
- * efficiency; this may not be worth doing...
+ * We place the switch outside the loop for the sake of efficiency;
+ * this may not be worth doing...
*/
switch (a->type)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.61 2004/08/29 04:12:32 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/list.c,v 1.62 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Assert(list->head->next == list->tail);
Assert(list->tail->next == NULL);
}
+
#else
#define check_list_invariants(l)
-#endif /* USE_ASSERT_CHECKING */
+#endif /* USE_ASSERT_CHECKING */
/*
* Return a freshly allocated List. Since empty non-NIL lists are
static List *
new_list(NodeTag type)
{
- List *new_list;
- ListCell *new_head;
+ List *new_list;
+ ListCell *new_head;
new_head = (ListCell *) palloc(sizeof(*new_head));
new_head->next = NULL;
static void
new_head_cell(List *list)
{
- ListCell *new_head;
+ ListCell *new_head;
new_head = (ListCell *) palloc(sizeof(*new_head));
new_head->next = list->head;
static void
new_tail_cell(List *list)
{
- ListCell *new_tail;
+ ListCell *new_tail;
new_tail = (ListCell *) palloc(sizeof(*new_tail));
new_tail->next = NULL;
/*
* Append an integer to the specified list. See lappend()
*/
-List *
+List *
lappend_int(List *list, int datum)
{
Assert(IsIntegerList(list));
/*
* Append an OID to the specified list. See lappend()
*/
-List *
+List *
lappend_oid(List *list, Oid datum)
{
Assert(IsOidList(list));
static ListCell *
add_new_cell(List *list, ListCell *prev_cell)
{
- ListCell *new_cell;
+ ListCell *new_cell;
new_cell = (ListCell *) palloc(sizeof(*new_cell));
/* new_cell->data is left undefined! */
ListCell *
lappend_cell(List *list, ListCell *prev, void *datum)
{
- ListCell *new_cell;
+ ListCell *new_cell;
Assert(IsPointerList(list));
ListCell *
lappend_cell_int(List *list, ListCell *prev, int datum)
{
- ListCell *new_cell;
+ ListCell *new_cell;
Assert(IsIntegerList(list));
ListCell *
lappend_cell_oid(List *list, ListCell *prev, Oid datum)
{
- ListCell *new_cell;
+ ListCell *new_cell;
Assert(IsOidList(list));
/*
* Prepend an OID to the list. See lcons()
*/
-List *
+List *
lcons_oid(Oid datum, List *list)
{
Assert(IsOidList(list));
List *
list_truncate(List *list, int new_size)
{
- ListCell *cell;
- int n;
+ ListCell *cell;
+ int n;
if (new_size <= 0)
- return NIL; /* truncate to zero length */
+ return NIL; /* truncate to zero length */
/* If asked to effectively extend the list, do nothing */
if (new_size >= list_length(list))
return list;
n = 1;
- foreach (cell, list)
+ foreach(cell, list)
{
if (n == new_size)
{
static ListCell *
list_nth_cell(List *list, int n)
{
- ListCell *match;
+ ListCell *match;
Assert(list != NIL);
Assert(n >= 0);
bool
list_member(List *list, void *datum)
{
- ListCell *cell;
+ ListCell *cell;
Assert(IsPointerList(list));
check_list_invariants(list);
- foreach (cell, list)
+ foreach(cell, list)
{
if (equal(lfirst(cell), datum))
return true;
bool
list_member_ptr(List *list, void *datum)
{
- ListCell *cell;
+ ListCell *cell;
Assert(IsPointerList(list));
check_list_invariants(list);
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst(cell) == datum)
return true;
bool
list_member_int(List *list, int datum)
{
- ListCell *cell;
+ ListCell *cell;
Assert(IsIntegerList(list));
check_list_invariants(list);
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst_int(cell) == datum)
return true;
bool
list_member_oid(List *list, Oid datum)
{
- ListCell *cell;
+ ListCell *cell;
Assert(IsOidList(list));
check_list_invariants(list);
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst_oid(cell) == datum)
return true;
/*
* Otherwise, adjust the necessary list links, deallocate the
- * particular node we have just removed, and return the list we
- * were given.
+ * particular node we have just removed, and return the list we were
+ * given.
*/
list->length--;
List *
list_delete(List *list, void *datum)
{
- ListCell *cell;
- ListCell *prev;
+ ListCell *cell;
+ ListCell *prev;
Assert(IsPointerList(list));
check_list_invariants(list);
prev = NULL;
- foreach (cell, list)
+ foreach(cell, list)
{
if (equal(lfirst(cell), datum))
return list_delete_cell(list, cell, prev);
List *
list_delete_ptr(List *list, void *datum)
{
- ListCell *cell;
- ListCell *prev;
+ ListCell *cell;
+ ListCell *prev;
Assert(IsPointerList(list));
check_list_invariants(list);
prev = NULL;
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst(cell) == datum)
return list_delete_cell(list, cell, prev);
List *
list_delete_int(List *list, int datum)
{
- ListCell *cell;
- ListCell *prev;
+ ListCell *cell;
+ ListCell *prev;
Assert(IsIntegerList(list));
check_list_invariants(list);
prev = NULL;
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst_int(cell) == datum)
return list_delete_cell(list, cell, prev);
List *
list_delete_oid(List *list, Oid datum)
{
- ListCell *cell;
- ListCell *prev;
+ ListCell *cell;
+ ListCell *prev;
Assert(IsOidList(list));
check_list_invariants(list);
prev = NULL;
- foreach (cell, list)
+ foreach(cell, list)
{
if (lfirst_oid(cell) == datum)
return list_delete_cell(list, cell, prev);
List *
list_union(List *list1, List *list2)
{
- List *result;
- ListCell *cell;
+ List *result;
+ ListCell *cell;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
List *
list_union_ptr(List *list1, List *list2)
{
- List *result;
- ListCell *cell;
+ List *result;
+ ListCell *cell;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
List *
list_union_int(List *list1, List *list2)
{
- List *result;
- ListCell *cell;
+ List *result;
+ ListCell *cell;
Assert(IsIntegerList(list1));
Assert(IsIntegerList(list2));
List *
list_union_oid(List *list1, List *list2)
{
- List *result;
- ListCell *cell;
+ List *result;
+ ListCell *cell;
Assert(IsOidList(list1));
Assert(IsOidList(list2));
List *
list_difference(List *list1, List *list2)
{
- ListCell *cell;
- List *result = NIL;
+ ListCell *cell;
+ List *result = NIL;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
if (list2 == NIL)
return list_copy(list1);
- foreach (cell, list1)
+ foreach(cell, list1)
{
if (!list_member(list2, lfirst(cell)))
result = lappend(result, lfirst(cell));
List *
list_difference_ptr(List *list1, List *list2)
{
- ListCell *cell;
- List *result = NIL;
+ ListCell *cell;
+ List *result = NIL;
Assert(IsPointerList(list1));
Assert(IsPointerList(list2));
if (list2 == NIL)
return list_copy(list1);
- foreach (cell, list1)
+ foreach(cell, list1)
{
if (!list_member_ptr(list2, lfirst(cell)))
result = lappend(result, lfirst(cell));
List *
list_difference_int(List *list1, List *list2)
{
- ListCell *cell;
- List *result = NIL;
+ ListCell *cell;
+ List *result = NIL;
Assert(IsIntegerList(list1));
Assert(IsIntegerList(list2));
if (list2 == NIL)
return list_copy(list1);
- foreach (cell, list1)
+ foreach(cell, list1)
{
if (!list_member_int(list2, lfirst_int(cell)))
result = lappend_int(result, lfirst_int(cell));
List *
list_difference_oid(List *list1, List *list2)
{
- ListCell *cell;
- List *result = NIL;
+ ListCell *cell;
+ List *result = NIL;
Assert(IsOidList(list1));
Assert(IsOidList(list2));
if (list2 == NIL)
return list_copy(list1);
- foreach (cell, list1)
+ foreach(cell, list1)
{
if (!list_member_oid(list2, lfirst_oid(cell)))
result = lappend_oid(result, lfirst_oid(cell));
static void
list_free_private(List *list, bool deep)
{
- ListCell *cell;
+ ListCell *cell;
check_list_invariants(list);
cell = list_head(list);
while (cell != NULL)
{
- ListCell *tmp = cell;
+ ListCell *tmp = cell;
cell = lnext(cell);
if (deep)
List *
list_copy(List *oldlist)
{
- List *newlist;
- ListCell *newlist_prev;
- ListCell *oldlist_cur;
+ List *newlist;
+ ListCell *newlist_prev;
+ ListCell *oldlist_cur;
if (oldlist == NIL)
return NIL;
oldlist_cur = oldlist->head->next;
while (oldlist_cur)
{
- ListCell *newlist_cur;
+ ListCell *newlist_cur;
newlist_cur = (ListCell *) palloc(sizeof(*newlist_cur));
newlist_cur->data = oldlist_cur->data;
List *
list_copy_tail(List *oldlist, int nskip)
{
- List *newlist;
- ListCell *newlist_prev;
- ListCell *oldlist_cur;
+ List *newlist;
+ ListCell *newlist_prev;
+ ListCell *oldlist_cur;
if (nskip < 0)
nskip = 0; /* would it be better to elog? */
oldlist_cur = oldlist_cur->next;
/*
- * Copy over the data in the first remaining cell; new_list() has already
- * allocated the head cell itself
+ * Copy over the data in the first remaining cell; new_list() has
+ * already allocated the head cell itself
*/
newlist->head->data = oldlist_cur->data;
oldlist_cur = oldlist_cur->next;
while (oldlist_cur)
{
- ListCell *newlist_cur;
+ ListCell *newlist_cur;
newlist_cur = (ListCell *) palloc(sizeof(*newlist_cur));
newlist_cur->data = oldlist_cur->data;
{
return l ? l->length : 0;
}
-
-#endif /* ! __GNUC__ */
+#endif /* ! __GNUC__ */
/*
* Temporary compatibility functions
* list_length() macro in order to avoid the overhead of a function
* call.
*/
-int length(List *list);
+int length(List *list);
int
length(List *list)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.242 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/outfuncs.c,v 1.243 2004/08/29 05:06:43 momjian Exp $
*
* NOTES
* Every node type that can appear in stored rules' parsetrees *must*
static void
_outList(StringInfo str, List *node)
{
- ListCell *lc;
+ ListCell *lc;
appendStringInfoChar(str, '(');
else if (IsA(node, OidList))
appendStringInfoChar(str, 'o');
- foreach (lc, node)
+ foreach(lc, node)
{
/*
* For the sake of backward compatibility, we emit a slightly
- * different whitespace format for lists of nodes vs. other
- * types of lists. XXX: is this necessary?
+ * different whitespace format for lists of nodes vs. other types
+ * of lists. XXX: is this necessary?
*/
if (IsA(node, List))
{
else if (IsA(node, OidList))
appendStringInfo(str, " %u", lfirst_oid(lc));
else
- elog(ERROR, "unrecognized list node type: %d",
- (int) node->type);
+ elog(ERROR, "unrecognized list node type: %d",
+ (int) node->type);
}
appendStringInfoChar(str, ')');
appendStringInfo(str, "%ld", value->val.ival);
break;
case T_Float:
+
/*
* We assume the value is a valid numeric literal and so does
* not need quoting.
{
if (obj == NULL)
appendStringInfo(str, "<>");
- else if (IsA(obj, List) || IsA(obj, IntList) || IsA(obj, OidList))
+ else if (IsA(obj, List) ||IsA(obj, IntList) || IsA(obj, OidList))
_outList(str, obj);
else if (IsA(obj, Integer) ||
IsA(obj, Float) ||
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.2 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/params.c,v 1.3 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
copyParamList(ParamListInfo from)
{
ParamListInfo retval;
- int i, size;
+ int i,
+ size;
if (from == NULL)
return NULL;
retval = (ParamListInfo) palloc0((size + 1) * sizeof(ParamListInfoData));
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
+ {
/* copy metadata */
retval[i].kind = from[i].kind;
if (from[i].kind == PARAM_NAMED)
retval[i].isnull = from[i].isnull;
if (from[i].isnull)
{
- retval[i].value = from[i].value; /* nulls just copy */
+ retval[i].value = from[i].value; /* nulls just copy */
}
else
{
- int16 typLen;
- bool typByVal;
+ int16 typLen;
+ bool typByVal;
get_typlenbyval(from[i].ptype, &typLen, &typByVal);
retval[i].value = datumCopy(from[i].value, typByVal, typLen);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.70 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/print.c,v 1.71 2004/08/29 05:06:43 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
j = indentDist - 1;
/* j will equal indentDist on next loop iteration */
/* suppress whitespace just after } */
- while (dump[i+1] == ' ')
+ while (dump[i + 1] == ' ')
i++;
break;
case ')':
/* force line break after ), unless another ) follows */
- if (dump[i+1] != ')')
+ if (dump[i + 1] != ')')
{
line[j + 1] = '\0';
appendStringInfo(&str, "%s\n", line);
j = indentDist - 1;
- while (dump[i+1] == ' ')
+ while (dump[i + 1] == ' ')
i++;
}
break;
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
c->constvalue,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(-1)));
printf("%s", outputstr);
pfree(outputstr);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.44 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/nodes/read.c,v 1.45 2004/08/29 05:06:43 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
pg_strtok_ptr = str; /* point pg_strtok at the string to read */
- retval = nodeRead(NULL, 0); /* do the reading */
+ retval = nodeRead(NULL, 0); /* do the reading */
pg_strtok_ptr = save_strtok;
#define RIGHT_PAREN (1000000 + 1)
#define LEFT_PAREN (1000000 + 2)
#define LEFT_BRACE (1000000 + 3)
-#define OTHER_TOKEN (1000000 + 4)
+#define OTHER_TOKEN (1000000 + 4)
/*
* nodeTokenType -
/* List of integers */
for (;;)
{
- int val;
- char *endptr;
+ int val;
+ char *endptr;
token = pg_strtok(&tok_len);
if (token == NULL)
/* List of OIDs */
for (;;)
{
- Oid val;
- char *endptr;
+ Oid val;
+ char *endptr;
token = pg_strtok(&tok_len);
if (token == NULL)
}
case RIGHT_PAREN:
elog(ERROR, "unexpected right parenthesis");
- result = NULL; /* keep compiler happy */
+ result = NULL; /* keep compiler happy */
break;
case OTHER_TOKEN:
if (tok_len == 0)
}
break;
case T_Integer:
+
/*
* we know that the token terminates on a char atol will stop
* at
}
default:
elog(ERROR, "unrecognized node type: %d", (int) type);
- result = NULL; /* keep compiler happy */
+ result = NULL; /* keep compiler happy */
break;
}
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.70 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_eval.c,v 1.71 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool desirable_join(Query *root,
- RelOptInfo *outer_rel, RelOptInfo *inner_rel);
+ RelOptInfo *outer_rel, RelOptInfo *inner_rel);
/*
* redundant cost calculations, we simply reject tours where tour[0] >
* tour[1], assigning them an artificially bad fitness.
*
- * init_tour() is aware of this rule and so we should never reject a
- * tour during the initial filling of the pool. It seems difficult to
+ * init_tour() is aware of this rule and so we should never reject a tour
+ * during the initial filling of the pool. It seems difficult to
* persuade the recombination logic never to break the rule, however.
*/
if (num_gene >= 2 && tour[0] > tour[1])
/*
* Push each relation onto the stack in the specified order. After
* pushing each relation, see whether the top two stack entries are
- * joinable according to the desirable_join() heuristics. If so,
- * join them into one stack entry, and try again to combine with the
- * next stack entry down (if any). When the stack top is no longer
- * joinable, continue to the next input relation. After we have pushed
- * the last input relation, the heuristics are disabled and we force
- * joining all the remaining stack entries.
+ * joinable according to the desirable_join() heuristics. If so, join
+ * them into one stack entry, and try again to combine with the next
+ * stack entry down (if any). When the stack top is no longer
+ * joinable, continue to the next input relation. After we have
+ * pushed the last input relation, the heuristics are disabled and we
+ * force joining all the remaining stack entries.
*
* If desirable_join() always returns true, this produces a straight
- * left-to-right join just like the old code. Otherwise we may produce
- * a bushy plan or a left/right-sided plan that really corresponds to
- * some tour other than the one given. To the extent that the heuristics
- * are helpful, however, this will be a better plan than the raw tour.
+ * left-to-right join just like the old code. Otherwise we may
+ * produce a bushy plan or a left/right-sided plan that really
+ * corresponds to some tour other than the one given. To the extent
+ * that the heuristics are helpful, however, this will be a better
+ * plan than the raw tour.
*
- * Also, when a join attempt fails (because of IN-clause constraints),
- * we may be able to recover and produce a workable plan, where the old
- * code just had to give up. This case acts the same as a false result
- * from desirable_join().
+ * Also, when a join attempt fails (because of IN-clause constraints), we
+ * may be able to recover and produce a workable plan, where the old
+ * code just had to give up. This case acts the same as a false
+ * result from desirable_join().
*/
for (rel_count = 0; rel_count < num_gene; rel_count++)
{
RelOptInfo *inner_rel = stack[stack_depth - 1];
/*
- * Don't pop if heuristics say not to join now. However,
- * once we have exhausted the input, the heuristics can't
- * prevent popping.
+ * Don't pop if heuristics say not to join now. However, once
+ * we have exhausted the input, the heuristics can't prevent
+ * popping.
*/
if (rel_count < num_gene - 1 &&
!desirable_join(evaldata->root, outer_rel, inner_rel))
break;
/*
- * Construct a RelOptInfo representing the join of these
- * two input relations. These are always inner joins.
- * Note that we expect the joinrel not to exist in
- * root->join_rel_list yet, and so the paths constructed for it
- * will only include the ones we want.
+ * Construct a RelOptInfo representing the join of these two
+ * input relations. These are always inner joins. Note that
+ * we expect the joinrel not to exist in root->join_rel_list
+ * yet, and so the paths constructed for it will only include
+ * the ones we want.
*/
joinrel = make_join_rel(evaldata->root, outer_rel, inner_rel,
JOIN_INNER);
}
/*
- * Join if the rels are members of the same IN sub-select. This is
- * needed to improve the odds that we will find a valid solution in
- * a case where an IN sub-select has a clauseless join.
+ * Join if the rels are members of the same IN sub-select. This is
+ * needed to improve the odds that we will find a valid solution in a
+ * case where an IN sub-select has a clauseless join.
*/
foreach(l, root->in_info_list)
{
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.46 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_main.c,v 1.47 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
size = pow(2.0, nr_rel + 1.0);
- maxsize = 50 * Geqo_effort; /* 50 to 500 individuals */
+ maxsize = 50 * Geqo_effort; /* 50 to 500 individuals */
if (size > maxsize)
return maxsize;
- minsize = 10 * Geqo_effort; /* 10 to 100 individuals */
+ minsize = 10 * Geqo_effort; /* 10 to 100 individuals */
if (size < minsize)
return minsize;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.24 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_pool.c,v 1.25 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int bad = 0;
/*
- * We immediately discard any invalid individuals (those that geqo_eval
- * returns DBL_MAX for), thereby not wasting pool space on them.
+ * We immediately discard any invalid individuals (those that
+ * geqo_eval returns DBL_MAX for), thereby not wasting pool space on
+ * them.
*
* If we fail to make any valid individuals after 10000 tries, give up;
* this probably means something is broken, and we shouldn't just let
* geqo_recombination.c
* misc recombination procedures
*
-* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.13 2004/01/23 23:54:21 tgl Exp $
+* $PostgreSQL: pgsql/src/backend/optimizer/geqo/geqo_recombination.c,v 1.14 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/*
- * Since geqo_eval() will reject tours where tour[0] > tour[1],
- * we may as well switch the two to make it a valid tour.
+ * Since geqo_eval() will reject tours where tour[0] > tour[1], we may
+ * as well switch the two to make it a valid tour.
*/
if (num_gene >= 2 && tour[0] > tour[1])
{
- Gene gtmp = tour[0];
+ Gene gtmp = tour[0];
tour[0] = tour[1];
tour[1] = gtmp;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.120 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/allpaths.c,v 1.121 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool qual_is_pushdown_safe(Query *subquery, Index rti, Node *qual,
bool *differentTypes);
static void subquery_push_qual(Query *subquery, List *rtable,
- Index rti, Node *qual);
+ Index rti, Node *qual);
static void recurse_push_qual(Node *setOp, Query *topquery,
- List *rtable, Index rti, Node *qual);
+ List *rtable, Index rti, Node *qual);
/*
static void
set_base_rel_pathlists(Query *root)
{
- ListCell *l;
+ ListCell *l;
foreach(l, root->base_rel_list)
{
check_partial_indexes(root, rel);
/*
- * Check to see if we can extract any restriction conditions from
- * join quals that are OR-of-AND structures. If so, add them to the
- * rel's restriction list, and recompute the size estimates.
+ * Check to see if we can extract any restriction conditions from join
+ * quals that are OR-of-AND structures. If so, add them to the rel's
+ * restriction list, and recompute the size estimates.
*/
if (create_or_index_quals(root, rel))
set_baserel_size_estimates(root, rel);
Var *parentvar = (Var *) lfirst(parentvars);
Var *childvar = (Var *) lfirst(childvars);
- if (IsA(parentvar, Var) && IsA(childvar, Var))
+ if (IsA(parentvar, Var) &&IsA(childvar, Var))
{
int pndx = parentvar->varattno - rel->min_attr;
int cndx = childvar->varattno - childrel->min_attr;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.69 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/clausesel.c,v 1.70 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* See if it looks like a restriction clause with a pseudoconstant
* on one side. (Anything more complicated than that might not
- * behave in the simple way we are expecting.) Most of the tests
+ * behave in the simple way we are expecting.) Most of the tests
* here can be done more efficiently with rinfo than without.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
ok = (bms_membership(rinfo->clause_relids) == BMS_SINGLETON) &&
(is_pseudo_constant_clause_relids(lsecond(expr->args),
- rinfo->right_relids) ||
+ rinfo->right_relids) ||
(varonleft = false,
- is_pseudo_constant_clause_relids(linitial(expr->args),
- rinfo->left_relids)));
+ is_pseudo_constant_clause_relids(linitial(expr->args),
+ rinfo->left_relids)));
}
else
{
{
/*
* If it's not a "<" or ">" operator, just merge the
- * selectivity in generically. But if it's the
- * right oprrest, add the clause to rqlist for later
- * processing.
+ * selectivity in generically. But if it's the right
+ * oprrest, add the clause to rqlist for later processing.
*/
switch (get_oprrest(expr->opno))
{
rinfo = (RestrictInfo *) clause;
/*
- * If possible, cache the result of the selectivity calculation for
- * the clause. We can cache if varRelid is zero or the clause
- * contains only vars of that relid --- otherwise varRelid will affect
- * the result, so mustn't cache. We also have to be careful about
- * the jointype. It's OK to cache when jointype is JOIN_INNER or
- * one of the outer join types (any given outer-join clause should
- * always be examined with the same jointype, so result won't change).
- * It's not OK to cache when jointype is one of the special types
- * associated with IN processing, because the same clause may be
- * examined with different jointypes and the result should vary.
+ * If possible, cache the result of the selectivity calculation
+ * for the clause. We can cache if varRelid is zero or the clause
+ * contains only vars of that relid --- otherwise varRelid will
+ * affect the result, so mustn't cache. We also have to be
+ * careful about the jointype. It's OK to cache when jointype is
+ * JOIN_INNER or one of the outer join types (any given outer-join
+ * clause should always be examined with the same jointype, so
+ * result won't change). It's not OK to cache when jointype is one
+ * of the special types associated with IN processing, because the
+ * same clause may be examined with different jointypes and the
+ * result should vary.
*/
if (varRelid == 0 ||
bms_is_subset_singleton(rinfo->clause_relids, varRelid))
s1 = restriction_selectivity(root,
BooleanEqualOperator,
list_make2(var,
- makeBoolConst(true,
+ makeBoolConst(true,
false)),
varRelid);
}
else if (IsA(clause, Param))
{
/* see if we can replace the Param */
- Node *subst = estimate_expression_value(clause);
+ Node *subst = estimate_expression_value(clause);
if (IsA(subst, Const))
{
else if (or_clause(clause))
{
/*
- * Selectivities for an OR clause are computed as s1+s2 - s1*s2
- * to account for the probable overlap of selected tuple sets.
+ * Selectivities for an OR clause are computed as s1+s2 - s1*s2 to
+ * account for the probable overlap of selected tuple sets.
*
* XXX is this too conservative?
*/
{
/*
* Otherwise, it's a join if there's more than one relation
- * used. We can optimize this calculation if an rinfo was passed.
+ * used. We can optimize this calculation if an rinfo was
+ * passed.
*/
if (rinfo)
is_join_clause = (bms_membership(rinfo->clause_relids) ==
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.133 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/costsize.c,v 1.134 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Selectivity joininfactor;
/*
- * If inner path is an indexscan, be sure to use its estimated output row
- * count, which may be lower than the restriction-clause-only row count of
- * its parent. (We don't include this case in the PATH_ROWS macro because
- * it applies *only* to a nestloop's inner relation.)
+ * If inner path is an indexscan, be sure to use its estimated output
+ * row count, which may be lower than the restriction-clause-only row
+ * count of its parent. (We don't include this case in the PATH_ROWS
+ * macro because it applies *only* to a nestloop's inner relation.)
*/
if (IsA(inner_path, IndexPath))
inner_path_rows = ((IndexPath *) inner_path)->rows;
* If we're doing JOIN_IN then we will stop scanning inner tuples for
* an outer tuple as soon as we have one match. Account for the
* effects of this by scaling down the cost estimates in proportion to
- * the JOIN_IN selectivity. (This assumes that all the quals
- * attached to the join are IN quals, which should be true.)
+ * the JOIN_IN selectivity. (This assumes that all the quals attached
+ * to the join are IN quals, which should be true.)
*/
joininfactor = join_in_selectivity(path, root);
if (mergeclauses)
{
firstclause = (RestrictInfo *) linitial(mergeclauses);
- if (firstclause->left_mergescansel < 0) /* not computed yet? */
+ if (firstclause->left_mergescansel < 0) /* not computed yet? */
mergejoinscansel(root, (Node *) firstclause->clause,
&firstclause->left_mergescansel,
&firstclause->right_mergescansel);
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_rightop(restrictinfo->clause),
+ get_rightop(restrictinfo->clause),
virtualbuckets);
restrictinfo->right_bucketsize = thisbucketsize;
}
/* not cached yet */
thisbucketsize =
estimate_hash_bucketsize(root,
- get_leftop(restrictinfo->clause),
+ get_leftop(restrictinfo->clause),
virtualbuckets);
restrictinfo->left_bucketsize = thisbucketsize;
}
return 1.0;
/*
- * Return 1.0 if the inner side is already known unique. The case where
- * the inner path is already a UniquePath probably cannot happen in
- * current usage, but check it anyway for completeness. The interesting
- * case is where we've determined the inner relation itself is unique,
- * which we can check by looking at the rows estimate for its UniquePath.
+ * Return 1.0 if the inner side is already known unique. The case
+ * where the inner path is already a UniquePath probably cannot happen
+ * in current usage, but check it anyway for completeness. The
+ * interesting case is where we've determined the inner relation
+ * itself is unique, which we can check by looking at the rows
+ * estimate for its UniquePath.
*/
if (IsA(path->innerjoinpath, UniquePath))
return 1.0;
return 1.0;
/*
- * Compute same result set_joinrel_size_estimates would compute
- * for JOIN_INNER. Note that we use the input rels' absolute size
- * estimates, not PATH_ROWS() which might be less; if we used PATH_ROWS()
- * we'd be double-counting the effects of any join clauses used in
- * input scans.
+ * Compute same result set_joinrel_size_estimates would compute for
+ * JOIN_INNER. Note that we use the input rels' absolute size
+ * estimates, not PATH_ROWS() which might be less; if we used
+ * PATH_ROWS() we'd be double-counting the effects of any join clauses
+ * used in input scans.
*/
selec = clauselist_selectivity(root,
path->joinrestrictinfo,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.163 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/indxpath.c,v 1.164 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Relids outer_relids,
JoinType jointype, bool isouterjoin);
static bool match_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
- int indexcol, Oid opclass,
- RestrictInfo *rinfo);
+ int indexcol, Oid opclass,
+ RestrictInfo *rinfo);
static bool match_join_clause_to_indexcol(RelOptInfo *rel, IndexOptInfo *index,
- int indexcol, Oid opclass,
- RestrictInfo *rinfo);
+ int indexcol, Oid opclass,
+ RestrictInfo *rinfo);
static Oid indexable_operator(Expr *clause, Oid opclass,
bool indexkey_on_left);
static bool pred_test(List *predicate_list, List *restrictinfo_list);
continue;
/*
- * 1. Match the index against non-OR restriction clauses.
- * (OR clauses will be considered later by orindxpath.c.)
+ * 1. Match the index against non-OR restriction clauses. (OR
+ * clauses will be considered later by orindxpath.c.)
*/
restrictclauses = group_clauses_by_indexkey(rel, index);
ListCell *l;
/*
- * We can always use plain restriction clauses for the rel. We scan
- * these first because we want them first in the clausegroup list
- * for the convenience of remove_redundant_join_clauses, which can
- * never remove non-join clauses and hence won't be able to get rid
- * of a non-join clause if it appears after a join clause it is
- * redundant with.
+ * We can always use plain restriction clauses for the rel. We
+ * scan these first because we want them first in the clausegroup
+ * list for the convenience of remove_redundant_join_clauses,
+ * which can never remove non-join clauses and hence won't be able
+ * to get rid of a non-join clause if it appears after a join
+ * clause it is redundant with.
*/
foreach(l, rel->baserestrictinfo)
{
}
/*
- * If we found clauses in more than one list, we may now have clauses
- * that are known redundant. Get rid of 'em.
+ * If we found clauses in more than one list, we may now have
+ * clauses that are known redundant. Get rid of 'em.
*/
if (numsources > 1)
{
* top-level restriction clauses of the relation. Furthermore, we demand
* that at least one such use be made, otherwise we fail and return NIL.
* (Any path we made without such a use would be redundant with non-OR
- * indexscans. Compare also group_clauses_by_indexkey_for_join.)
+ * indexscans. Compare also group_clauses_by_indexkey_for_join.)
*
* XXX When we generate an indexqual list that uses both the OR subclause
* and top-level restriction clauses, we end up with a slightly inefficient
* If we found no clauses for this indexkey in the OR subclause
* itself, try looking in the rel's top-level restriction list.
*
- * XXX should we always search the top-level list? Slower but
- * could sometimes yield a better plan.
+ * XXX should we always search the top-level list? Slower but could
+ * sometimes yield a better plan.
*/
if (clausegroup == NIL)
{
*
* The strategy numbers defined by btree indexes (see access/skey.h) are:
* (1) < (2) <= (3) = (4) >= (5) >
- * and in addition we use (6) to represent <>. <> is not a btree-indexable
+ * and in addition we use (6) to represent <>. <> is not a btree-indexable
* operator, but we assume here that if the equality operator of a btree
* opclass has a negator operator, the negator behaves as <> for the opclass.
*
/*
* The target operator:
*
- * LT LE EQ GE GT NE
+ * LT LE EQ GE GT NE
*/
- {BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
- {BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
+ {BTGE, BTGE, 0, 0, 0, BTGE}, /* LT */
+ {BTGT, BTGE, 0, 0, 0, BTGT}, /* LE */
{BTGT, BTGE, BTEQ, BTLE, BTLT, BTNE}, /* EQ */
- { 0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
- { 0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
- { 0, 0, 0, 0, 0, BTEQ} /* NE */
+ {0, 0, 0, BTLE, BTLT, BTLT}, /* GE */
+ {0, 0, 0, BTLE, BTLE, BTLE}, /* GT */
+ {0, 0, 0, 0, 0, BTEQ} /* NE */
};
* implies another:
*
* A simple and general way is to see if they are equal(); this works for any
- * kind of expression. (Actually, there is an implied assumption that the
+ * kind of expression. (Actually, there is an implied assumption that the
* functions in the expression are immutable, ie dependent only on their input
* arguments --- but this was checked for the predicate by CheckPredicate().)
*
* When the predicate is of the form "foo IS NOT NULL", we can conclude that
* the predicate is implied if the clause is a strict operator or function
- * that has "foo" as an input. In this case the clause must yield NULL when
+ * that has "foo" as an input. In this case the clause must yield NULL when
* "foo" is NULL, which we can take as equivalent to FALSE because we know
* we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is
* already known immutable, so the clause will certainly always fail.)
*
* Our other way works only for binary boolean opclauses of the form
- * "foo op constant", where "foo" is the same in both clauses. The operators
+ * "foo op constant", where "foo" is the same in both clauses. The operators
* and constants can be different but the operators must be in the same btree
- * operator class. We use the above operator implication table to be able to
+ * operator class. We use the above operator implication table to be able to
* derive implications between nonidentical clauses. (Note: "foo" is known
* immutable, and constants are surely immutable, but we have to check that
* the operators are too. As of 8.0 it's possible for opclasses to contain
if (predicate && IsA(predicate, NullTest) &&
((NullTest *) predicate)->nulltesttype == IS_NOT_NULL)
{
- Expr *nonnullarg = ((NullTest *) predicate)->arg;
+ Expr *nonnullarg = ((NullTest *) predicate)->arg;
if (is_opclause(clause) &&
list_member(((OpExpr *) clause)->args, nonnullarg) &&
/*
* Can't do anything more unless they are both binary opclauses with a
* Const on one side, and identical subexpressions on the other sides.
- * Note we don't have to think about binary relabeling of the Const node,
- * since that would have been folded right into the Const.
+ * Note we don't have to think about binary relabeling of the Const
+ * node, since that would have been folded right into the Const.
*
* If either Const is null, we also fail right away; this assumes that
* the test operator will always be strict.
return false;
/*
- * Check for matching subexpressions on the non-Const sides. We used to
- * only allow a simple Var, but it's about as easy to allow any
- * expression. Remember we already know that the pred expression does
+ * Check for matching subexpressions on the non-Const sides. We used
+ * to only allow a simple Var, but it's about as easy to allow any
+ * expression. Remember we already know that the pred expression does
* not contain any non-immutable functions, so identical expressions
* should yield identical results.
*/
return false;
/*
- * Okay, get the operators in the two clauses we're comparing.
- * Commute them if needed so that we can assume the variables are
- * on the left.
+ * Okay, get the operators in the two clauses we're comparing. Commute
+ * them if needed so that we can assume the variables are on the left.
*/
pred_op = ((OpExpr *) predicate)->opno;
if (!pred_var_on_left)
*
* We must find a btree opclass that contains both operators, else the
* implication can't be determined. Also, the pred_op has to be of
- * default subtype (implying left and right input datatypes are the same);
- * otherwise it's unsafe to put the pred_const on the left side of the
- * test. Also, the opclass must contain a suitable test operator
- * matching the clause_const's type (which we take to mean that it has
- * the same subtype as the original clause_operator).
+ * default subtype (implying left and right input datatypes are the
+ * same); otherwise it's unsafe to put the pred_const on the left side
+ * of the test. Also, the opclass must contain a suitable test
+ * operator matching the clause_const's type (which we take to mean
+ * that it has the same subtype as the original clause_operator).
*
* If there are multiple matching opclasses, assume we can use any one to
- * determine the logical relationship of the two operators and the correct
- * corresponding test operator. This should work for any logically
- * consistent opclasses.
+ * determine the logical relationship of the two operators and the
+ * correct corresponding test operator. This should work for any
+ * logically consistent opclasses.
*/
catlist = SearchSysCacheList(AMOPOPID, 1,
ObjectIdGetDatum(pred_op),
pred_op_negated = true;
ReleaseSysCacheList(catlist);
catlist = SearchSysCacheList(AMOPOPID, 1,
- ObjectIdGetDatum(pred_op_negator),
+ ObjectIdGetDatum(pred_op_negator),
0, 0, 0);
}
}
}
/*
- * From the same opclass, find a strategy number for the clause_op,
- * if possible
+ * From the same opclass, find a strategy number for the
+ * clause_op, if possible
*/
clause_tuple = SearchSysCache(AMOPOPID,
ObjectIdGetDatum(clause_op),
else if (OidIsValid(clause_op_negator))
{
clause_tuple = SearchSysCache(AMOPOPID,
- ObjectIdGetDatum(clause_op_negator),
+ ObjectIdGetDatum(clause_op_negator),
ObjectIdGetDatum(opclass_id),
0, 0);
if (HeapTupleIsValid(clause_tuple))
/*
* Last check: test_op must be immutable.
*
- * Note that we require only the test_op to be immutable, not
- * the original clause_op. (pred_op must be immutable, else it
+ * Note that we require only the test_op to be immutable, not the
+ * original clause_op. (pred_op must be immutable, else it
* would not be allowed in an index predicate.) Essentially
* we are assuming that the opclass is consistent even if it
* contains operators that are merely stable.
/* And execute it. */
test_result = ExecEvalExprSwitchContext(test_exprstate,
- GetPerTupleExprContext(estate),
+ GetPerTupleExprContext(estate),
&isNull, NULL);
/* Get back to outer memory context */
ListCell *l;
foreach(l, clausegroups)
- {
allclauses = list_concat(allclauses, list_copy((List *) lfirst(l)));
- }
return allclauses;
}
foreach(orlist, indexclauses)
{
- List *andlist = (List *) lfirst(orlist);
+ List *andlist = (List *) lfirst(orlist);
/* Strip RestrictInfos */
andlist = get_actual_clauses(andlist);
* (The latter is not depended on by any part of the planner, so far as I can
* tell; but some parts of the executor do assume that the indxqual list
* ultimately delivered to the executor is so ordered. One such place is
- * _bt_preprocess_keys() in the btree support. Perhaps that ought to be fixed
+ * _bt_preprocess_keys() in the btree support. Perhaps that ought to be fixed
* someday --- tgl 7/00)
*/
List *
resultquals = list_concat(resultquals,
expand_indexqual_condition(rinfo,
- curClass));
+ curClass));
}
clausegroup_item = lnext(clausegroup_item);
expand_indexqual_condition(RestrictInfo *rinfo, Oid opclass)
{
Expr *clause = rinfo->clause;
+
/* we know these will succeed */
Node *leftop = get_leftop(clause);
Node *rightop = get_rightop(clause);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.89 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinpath.c,v 1.90 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Forget it if can't use all the clauses in right/full join */
if (useallclauses &&
- list_length(cur_mergeclauses) != list_length(mergeclause_list))
+ list_length(cur_mergeclauses) != list_length(mergeclause_list))
continue;
/*
/*
* Done with this outer path if no chance for a mergejoin.
*
- * Special corner case: for "x FULL JOIN y ON true", there will be
- * no join clauses at all. Ordinarily we'd generate a clauseless
+ * Special corner case: for "x FULL JOIN y ON true", there will be no
+ * join clauses at all. Ordinarily we'd generate a clauseless
* nestloop path, but since mergejoin is our only join type that
* supports FULL JOIN, it's necessary to generate a clauseless
* mergejoin path instead.
if (mergeclauses == NIL)
{
if (jointype == JOIN_FULL && restrictlist == NIL)
- /* okay to try for mergejoin */ ;
+ /* okay to try for mergejoin */ ;
else
continue;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.70 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/joinrels.c,v 1.71 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
other_rels = lnext(r); /* only consider remaining initial
* rels */
else
- other_rels = list_head(joinrels[1]); /* consider all initial rels */
+ other_rels = list_head(joinrels[1]); /* consider all initial
+ * rels */
if (old_rel->joininfo != NIL)
{
new_rels = make_rels_by_clause_joins(root,
old_rel,
other_rels);
+
/*
- * An exception occurs when there is a clauseless join inside an
- * IN (sub-SELECT) construct. Here, the members of the subselect
- * all have join clauses (against the stuff outside the IN), but
- * they *must* be joined to each other before we can make use of
- * those join clauses. So do the clauseless join bit.
+ * An exception occurs when there is a clauseless join inside
+ * an IN (sub-SELECT) construct. Here, the members of the
+ * subselect all have join clauses (against the stuff outside
+ * the IN), but they *must* be joined to each other before we
+ * can make use of those join clauses. So do the clauseless
+ * join bit.
*
* See also the last-ditch case below.
*/
other_rels = lnext(r); /* only consider remaining initial
* rels */
else
- other_rels = list_head(joinrels[1]); /* consider all initial
- * rels */
+ other_rels = list_head(joinrels[1]); /* consider all initial
+ * rels */
new_rels = make_rels_by_clauseless_joins(root,
old_rel,
/*----------
* When IN clauses are involved, there may be no legal way to make
- * an N-way join for some values of N. For example consider
+ * an N-way join for some values of N. For example consider
*
* SELECT ... FROM t1 WHERE
- * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
- * y IN (SELECT ... FROM t4,t5 WHERE ...)
+ * x IN (SELECT ... FROM t2,t3 WHERE ...) AND
+ * y IN (SELECT ... FROM t4,t5 WHERE ...)
*
* We will flatten this query to a 5-way join problem, but there are
* no 4-way joins that make_join_rel() will consider legal. We have
/*
* This IN clause is not relevant unless its RHS overlaps the
- * proposed join. (Check this first as a fast path for dismissing
- * most irrelevant INs quickly.)
+ * proposed join. (Check this first as a fast path for
+ * dismissing most irrelevant INs quickly.)
*/
if (!bms_overlap(ininfo->righthand, joinrelids))
continue;
* some other rel(s).
*
* If we already joined IN's RHS to any other rels in either
- * input path, then this join is not constrained (the necessary
- * work was done at the lower level where that join occurred).
+ * input path, then this join is not constrained (the
+ * necessary work was done at the lower level where that join
+ * occurred).
*/
if (bms_is_subset(ininfo->righthand, rel1->relids) &&
!bms_equal(ininfo->righthand, rel1->relids))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.61 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/orindxpath.c,v 1.62 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static IndexPath *best_or_subclause_indexes(Query *root, RelOptInfo *rel,
- List *subclauses);
+ List *subclauses);
static bool best_or_subclause_index(Query *root,
RelOptInfo *rel,
Expr *subclause,
*
* The added quals are partially redundant with the original OR, and therefore
* will cause the size of the joinrel to be underestimated when it is finally
- * formed. (This would be true of a full transformation to CNF as well; the
+ * formed. (This would be true of a full transformation to CNF as well; the
* fault is not really in the transformation, but in clauselist_selectivity's
* inability to recognize redundant conditions.) To minimize the collateral
* damage, we want to minimize the number of quals added. Therefore we do
* it is finally formed. This is a MAJOR HACK: it depends on the fact
* that clause selectivities are cached and on the fact that the same
* RestrictInfo node will appear in every joininfo list that might be used
- * when the joinrel is formed. And it probably isn't right in cases where
+ * when the joinrel is formed. And it probably isn't right in cases where
* the size estimation is nonlinear (i.e., outer and IN joins). But it
* beats not doing anything.
*
ListCell *i;
/*
- * We use the best_or_subclause_indexes() machinery to locate the
- * best combination of restriction subclauses. Note we must ignore
- * any joinclauses that are not marked valid_everywhere, because they
+ * We use the best_or_subclause_indexes() machinery to locate the best
+ * combination of restriction subclauses. Note we must ignore any
+ * joinclauses that are not marked valid_everywhere, because they
* cannot be pushed down due to outer-join rules.
*/
foreach(i, rel->joininfo)
pathnode = best_or_subclause_indexes(root,
rel,
- ((BoolExpr *) rinfo->orclause)->args);
+ ((BoolExpr *) rinfo->orclause)->args);
if (pathnode)
{
if (bestpath == NULL ||
- pathnode->path.total_cost < bestpath->path.total_cost)
+ pathnode->path.total_cost < bestpath->path.total_cost)
{
bestpath = pathnode;
bestrinfo = rinfo;
return false;
/*
- * Convert the indexclauses structure to a RestrictInfo tree,
- * and add it to the rel's restriction list.
+ * Convert the indexclauses structure to a RestrictInfo tree, and add
+ * it to the rel's restriction list.
*/
newrinfos = make_restrictinfo_from_indexclauses(bestpath->indexclauses,
true, true);
* Adjust the original OR clause's cached selectivity to compensate
* for the selectivity of the added (but redundant) lower-level qual.
* This should result in the join rel getting approximately the same
- * rows estimate as it would have gotten without all these shenanigans.
- * (XXX major hack alert ... this depends on the assumption that the
- * selectivity will stay cached ...)
+ * rows estimate as it would have gotten without all these
+ * shenanigans. (XXX major hack alert ... this depends on the
+ * assumption that the selectivity will stay cached ...)
*/
or_selec = clause_selectivity(root, (Node *) or_rinfo,
0, JOIN_INNER);
ListCell *l;
/*
- * Check each restriction clause to see if it is an OR clause, and if so,
- * try to make a path using it.
+ * Check each restriction clause to see if it is an OR clause, and if
+ * so, try to make a path using it.
*/
foreach(l, rel->baserestrictinfo)
{
pathnode = best_or_subclause_indexes(root,
rel,
- ((BoolExpr *) rinfo->orclause)->args);
+ ((BoolExpr *) rinfo->orclause)->args);
if (pathnode)
add_path(rel, (Path *) pathnode);
if (!best_or_subclause_index(root, rel, subclause,
&best_indexinfo,
&best_indexclauses, &best_indexquals,
- &best_startup_cost, &best_total_cost))
+ &best_startup_cost, &best_total_cost))
return NULL; /* failed to match this subclause */
infos = lappend(infos, best_indexinfo);
clauses = lappend(clauses, best_indexclauses);
quals = lappend(quals, best_indexquals);
+
/*
- * Path startup_cost is the startup cost for the first index scan only;
- * startup costs for later scans will be paid later on, so they just
- * get reflected in total_cost.
+ * Path startup_cost is the startup cost for the first index scan
+ * only; startup costs for later scans will be paid later on, so
+ * they just get reflected in total_cost.
*
* Total cost is sum of the per-scan costs.
*/
- if (slist == list_head(subclauses)) /* first scan? */
+ if (slist == list_head(subclauses)) /* first scan? */
path_startup_cost = best_startup_cost;
path_total_cost += best_total_cost;
}
/*
* This is an IndexScan, but the overall result will consist of tuples
- * extracted in multiple passes (one for each subclause of the OR),
- * so the result cannot be claimed to have any particular ordering.
+ * extracted in multiple passes (one for each subclause of the OR), so
+ * the result cannot be claimed to have any particular ordering.
*/
pathnode->path.pathkeys = NIL;
RelOptInfo *rel,
Expr *subclause,
IndexOptInfo **retIndexInfo, /* return value */
- List **retIndexClauses, /* return value */
+ List **retIndexClauses, /* return value */
List **retIndexQuals, /* return value */
Cost *retStartupCost, /* return value */
Cost *retTotalCost) /* return value */
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.61 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/path/pathkeys.c,v 1.62 2004/08/29 05:06:43 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Some callers pass expressions that are not necessarily of the same
- * type as the sort operator expects as input (for example when dealing
- * with an index that uses binary-compatible operators). We must relabel
- * these with the correct type so that the key expressions will be seen
- * as equal() to expressions that have been correctly labeled.
+ * type as the sort operator expects as input (for example when
+ * dealing with an index that uses binary-compatible operators). We
+ * must relabel these with the correct type so that the key
+ * expressions will be seen as equal() to expressions that have been
+ * correctly labeled.
*/
if (checkType)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.173 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/createplan.c,v 1.174 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Material *create_material_plan(Query *root, MaterialPath *best_path);
static Plan *create_unique_plan(Query *root, UniquePath *best_path);
static SeqScan *create_seqscan_plan(Query *root, Path *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static IndexScan *create_indexscan_plan(Query *root, IndexPath *best_path,
List *tlist, List *scan_clauses);
static TidScan *create_tidscan_plan(Query *root, TidPath *best_path,
- List *tlist, List *scan_clauses);
+ List *tlist, List *scan_clauses);
static SubqueryScan *create_subqueryscan_plan(Query *root, Path *best_path,
List *tlist, List *scan_clauses);
static FunctionScan *create_functionscan_plan(Query *root, Path *best_path,
* If this is a innerjoin scan, the indexclauses will contain join
* clauses that are not present in scan_clauses (since the passed-in
* value is just the rel's baserestrictinfo list). We must add these
- * clauses to scan_clauses to ensure they get checked. In most cases
+ * clauses to scan_clauses to ensure they get checked. In most cases
* we will remove the join clauses again below, but if a join clause
* contains a special operator, we need to make sure it gets into the
* scan_clauses.
{
/*
* We don't currently support OR indexscans in joins, so we only
- * need to worry about the plain AND case. Also, pointer comparison
- * should be enough to determine RestrictInfo matches.
+ * need to worry about the plain AND case. Also, pointer
+ * comparison should be enough to determine RestrictInfo matches.
*/
Assert(list_length(best_path->indexclauses) == 1);
scan_clauses = list_union_ptr(scan_clauses,
- (List *) linitial(best_path->indexclauses));
+ (List *) linitial(best_path->indexclauses));
}
/* Reduce RestrictInfo list to bare expressions */
stripped_indxquals = NIL;
foreach(l, indxquals)
{
- List *andlist = (List *) lfirst(l);
+ List *andlist = (List *) lfirst(l);
stripped_indxquals = lappend(stripped_indxquals,
get_actual_clauses(andlist));
/*
* The qpqual list must contain all restrictions not automatically
- * handled by the index. All the predicates in the indexquals will
- * be checked (either by the index itself, or by nodeIndexscan.c), but
- * if there are any "special" operators involved then they must be
- * added to qpqual. The upshot is that qpquals must contain scan_clauses
+ * handled by the index. All the predicates in the indexquals will be
+ * checked (either by the index itself, or by nodeIndexscan.c), but if
+ * there are any "special" operators involved then they must be added
+ * to qpqual. The upshot is that qpquals must contain scan_clauses
* minus whatever appears in indxquals.
*/
if (list_length(indxquals) > 1)
/*
* Build an expression representation of the indexqual, expanding
* the implicit OR and AND semantics of the first- and
- * second-level lists. (The odds that this will exactly match any
+ * second-level lists. (The odds that this will exactly match any
* scan_clause are not great; perhaps we need more smarts here.)
*/
indxqual_or_expr = make_expr_from_indexclauses(indxquals);
Relids baserelids = index_path->path.parent->relids;
int baserelid = index_path->path.parent->relid;
List *index_info = index_path->indexinfo;
- ListCell *iq, *ii;
+ ListCell *iq,
+ *ii;
*fixed_indexquals = NIL;
*indxstrategy = NIL;
*
* For each qual clause, commute if needed to put the indexkey operand on the
* left, and then fix its varattno. (We do not need to change the other side
- * of the clause.) Then determine the operator's strategy number and subtype
+ * of the clause.) Then determine the operator's strategy number and subtype
* number, and check for lossy index behavior.
*
* Returns four lists:
Assert(IsA(rinfo, RestrictInfo));
clause = (OpExpr *) rinfo->clause;
- if (!IsA(clause, OpExpr) || list_length(clause->args) != 2)
+ if (!IsA(clause, OpExpr) ||list_length(clause->args) != 2)
elog(ERROR, "indexqual clause is not binary opclause");
/*
* indexkey operand as needed, and get the index opclass.
*/
linitial(newclause->args) = fix_indxqual_operand(linitial(newclause->args),
- baserelid,
- index,
- &opclass);
+ baserelid,
+ index,
+ &opclass);
*fixed_quals = lappend(*fixed_quals, newclause);
/*
- * Look up the (possibly commuted) operator in the operator class to
- * get its strategy numbers and the recheck indicator. This also
- * double-checks that we found an operator matching the index.
+ * Look up the (possibly commuted) operator in the operator class
+ * to get its strategy numbers and the recheck indicator. This
+ * also double-checks that we found an operator matching the
+ * index.
*/
get_op_opclass_properties(newclause->opno, opclass,
&stratno, &stratsubtype, &recheck);
{
Plan *subplan = (Plan *) lfirst(subnode);
- if (subnode == list_head(appendplans)) /* first node? */
+ if (subnode == list_head(appendplans)) /* first node? */
plan->startup_cost = subplan->startup_cost;
plan->total_cost += subplan->total_cost;
plan->plan_rows += subplan->plan_rows;
AttrNumber *sortColIdx;
Oid *sortOperators;
- /* We will need at most list_length(pathkeys) sort columns; possibly less */
+ /*
+ * We will need at most list_length(pathkeys) sort columns; possibly
+ * less
+ */
numsortkeys = list_length(pathkeys);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
/* No matching Var; look for a computable expression */
foreach(j, keysublist)
{
- List *exprvars;
- ListCell *k;
+ List *exprvars;
+ ListCell *k;
pathkey = (PathKeyItem *) lfirst(j);
exprvars = pull_var_clause(pathkey->key, false);
AttrNumber *sortColIdx;
Oid *sortOperators;
- /* We will need at most list_length(sortcls) sort columns; possibly less */
+ /*
+ * We will need at most list_length(sortcls) sort columns; possibly
+ * less
+ */
numsortkeys = list_length(sortcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
AttrNumber *sortColIdx;
Oid *sortOperators;
- /* We will need at most list_length(groupcls) sort columns; possibly less */
+ /*
+ * We will need at most list_length(groupcls) sort columns; possibly
+ * less
+ */
numsortkeys = list_length(groupcls);
sortColIdx = (AttrNumber *) palloc(numsortkeys * sizeof(AttrNumber));
sortOperators = (Oid *) palloc(numsortkeys * sizeof(Oid));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.102 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/initsplan.c,v 1.103 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* tree.
*
* We also need to determine whether the qual is "valid everywhere",
- * which is true if the qual mentions no variables that are involved
- * in lower-level outer joins (this may be an overly strong test).
+ * which is true if the qual mentions no variables that are
+ * involved in lower-level outer joins (this may be an overly
+ * strong test).
*/
Relids addrelids = NULL;
Relids tmprelids;
{
/* delete it from local restrictinfo list */
rel1->baserestrictinfo = list_delete_ptr(rel1->baserestrictinfo,
- restrictinfo);
+ restrictinfo);
}
else
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.173 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/planner.c,v 1.174 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "utils/syscache.h"
-ParamListInfo PlannerBoundParamList = NULL; /* current boundParams */
+ParamListInfo PlannerBoundParamList = NULL; /* current boundParams */
/* Expression kind codes for preprocess_expression */
* eval_const_expressions tries to pre-evaluate an SQL function). So,
* these global state variables must be saved and restored.
*
- * Query level and the param list cannot be moved into the Query structure
- * since their whole purpose is communication across multiple sub-Queries.
- * Also, boundParams is explicitly info from outside the Query, and so
- * is likewise better handled as a global variable.
+ * Query level and the param list cannot be moved into the Query
+ * structure since their whole purpose is communication across
+ * multiple sub-Queries. Also, boundParams is explicitly info from
+ * outside the Query, and so is likewise better handled as a global
+ * variable.
*
* Note we do NOT save and restore PlannerPlanId: it exists to assign
* unique IDs to SubPlan nodes, and we want those IDs to be unique for
expr = flatten_join_alias_vars(parse, expr);
/*
- * If it's a qual or havingQual, canonicalize it. It seems most useful
- * to do this before applying eval_const_expressions, since the latter
- * can optimize flattened AND/ORs better than unflattened ones.
+ * If it's a qual or havingQual, canonicalize it. It seems most
+ * useful to do this before applying eval_const_expressions, since the
+ * latter can optimize flattened AND/ORs better than unflattened ones.
*
* Note: all processing of a qual expression after this point must be
* careful to maintain AND/OR flatness --- that is, do not generate a
/*
* If it's a qual or havingQual, convert it to implicit-AND format.
* (We don't want to do this before eval_const_expressions, since the
- * latter would be unable to simplify a top-level AND correctly. Also,
- * SS_process_sublinks expects explicit-AND format.)
+ * latter would be unable to simplify a top-level AND correctly.
+ * Also, SS_process_sublinks expects explicit-AND format.)
*/
if (kind == EXPRKIND_QUAL)
expr = (Node *) make_ands_implicit((Expr *) expr);
if (parse->setOperations)
{
- List *set_sortclauses;
+ List *set_sortclauses;
/*
* Construct the plan for set operations. The result will not
* the sort key information...
*/
current_pathkeys = make_pathkeys_for_sortclauses(set_sortclauses,
- result_plan->targetlist);
+ result_plan->targetlist);
current_pathkeys = canonicalize_pathkeys(parse, current_pathkeys);
/*
*
* Note: think not that we can turn off hasAggs if we find no aggs.
* It is possible for constant-expression simplification to remove
- * all explicit references to aggs, but we still have to follow the
- * aggregate semantics (eg, producing only one output row).
+ * all explicit references to aggs, but we still have to follow
+ * the aggregate semantics (eg, producing only one output row).
*/
if (parse->hasAggs)
numAggs = count_agg_clause((Node *) tlist) +
{
/*
* Use hashed grouping if (a) we think we can fit the
- * hashtable into work_mem, *and* (b) the estimated cost is
- * no more than doing it the other way. While avoiding
+ * hashtable into work_mem, *and* (b) the estimated cost
+ * is no more than doing it the other way. While avoiding
* the need for sorted input is usually a win, the fact
* that the output won't be sorted may be a loss; so we
* need to do an actual cost comparison.
foreach(gl, parse->groupClause)
{
- GroupClause *grpcl = (GroupClause *) lfirst(gl);
- Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
- TargetEntry *te = NULL;
- ListCell *sl;
+ GroupClause *grpcl = (GroupClause *) lfirst(gl);
+ Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
+ TargetEntry *te = NULL;
+ ListCell *sl;
/* Find or make a matching sub_tlist entry */
foreach(sl, sub_tlist)
foreach(gl, parse->groupClause)
{
- GroupClause *grpcl = (GroupClause *) lfirst(gl);
- Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
- TargetEntry *te = NULL;
- ListCell *sl;
+ GroupClause *grpcl = (GroupClause *) lfirst(gl);
+ Node *groupexpr = get_sortgroupclause_expr(grpcl, tlist);
+ TargetEntry *te = NULL;
+ ListCell *sl;
foreach(sl, sub_tlist)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.103 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/setrefs.c,v 1.104 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
set_plan_references(Plan *plan, List *rtable)
{
- ListCell *l;
+ ListCell *l;
if (plan == NULL)
return;
*/
break;
case T_Limit:
+
/*
- * Like the plan types above, Limit doesn't evaluate its
- * tlist or quals. It does have live expressions for
- * limit/offset, however.
+ * Like the plan types above, Limit doesn't evaluate its tlist
+ * or quals. It does have live expressions for limit/offset,
+ * however.
*/
fix_expr_references(plan, ((Limit *) plan)->limitOffset);
fix_expr_references(plan, ((Limit *) plan)->limitCount);
fix_expr_references(plan, ((Result *) plan)->resconstantqual);
break;
case T_Append:
+
/*
* Append, like Sort et al, doesn't actually evaluate its
- * targetlist or quals, and we haven't bothered to give it
- * its own tlist copy. So, don't fix targetlist/qual. But
- * do recurse into child plans.
+ * targetlist or quals, and we haven't bothered to give it its
+ * own tlist copy. So, don't fix targetlist/qual. But do
+ * recurse into child plans.
*/
foreach(l, ((Append *) plan)->appendplans)
set_plan_references((Plan *) lfirst(l), rtable);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.92 2004/08/29 04:12:33 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/plan/subselect.c,v 1.93 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* The correct field should get stored into the Param slot at
* execution in each part of the tree.
*
- * We also need to demand a match on vartypmod. This does not matter
- * for the Param itself, since those are not typmod-dependent, but it
- * does matter when make_subplan() instantiates a modified copy of the
- * Var for a subplan's args list.
+ * We also need to demand a match on vartypmod. This does not matter for
+ * the Param itself, since those are not typmod-dependent, but it does
+ * matter when make_subplan() instantiates a modified copy of the Var
+ * for a subplan's args list.
*/
i = 0;
foreach(ppl, PlannerParamList)
List **righthandIds)
{
List *result = NIL;
- ListCell *l, *lefthand_item, *tlist_item;
+ ListCell *l,
+ *lefthand_item,
+ *tlist_item;
*righthandIds = NIL;
lefthand_item = list_head(lefthand);
te->resdom->restype,
te->resdom->restypmod,
0);
+
/*
- * Copy it for caller. NB: we need a copy to avoid having
+ * Copy it for caller. NB: we need a copy to avoid having
* doubly-linked substructure in the modified parse tree.
*/
*righthandIds = lappend(*righthandIds, copyObject(rightop));
return false;
/*
- * The estimated size of the subquery result must fit in work_mem. (XXX
- * what about hashtable overhead?)
+ * The estimated size of the subquery result must fit in work_mem.
+ * (XXX what about hashtable overhead?)
*/
subquery_size = node->plan->plan_rows *
(MAXALIGN(node->plan->plan_width) + MAXALIGN(sizeof(HeapTupleData)));
/*
* Build the result qual expressions. As a side effect,
- * ininfo->sub_targetlist is filled with a list of Vars
- * representing the subselect outputs.
+ * ininfo->sub_targetlist is filled with a list of Vars representing
+ * the subselect outputs.
*/
exprs = convert_sublink_opers(sublink->lefthand,
sublink->operOids,
/*
* Because make_subplan() could return an AND or OR clause, we have to
- * take steps to preserve AND/OR flatness of a qual. We assume the input
- * has been AND/OR flattened and so we need no recursion here.
+ * take steps to preserve AND/OR flatness of a qual. We assume the
+ * input has been AND/OR flattened and so we need no recursion here.
*
- * If we recurse down through anything other than an AND node,
- * we are definitely not at top qual level anymore. (Due to the coding
- * here, we will not get called on the List subnodes of an AND, so no
- * check is needed for List.)
+ * If we recurse down through anything other than an AND node, we are
+ * definitely not at top qual level anymore. (Due to the coding here,
+ * we will not get called on the List subnodes of an AND, so no check
+ * is needed for List.)
*/
if (and_clause(node))
{
- List *newargs = NIL;
- ListCell *l;
+ List *newargs = NIL;
+ ListCell *l;
/* Still at qual top-level */
locTopQual = *isTopQual;
foreach(l, ((BoolExpr *) node)->args)
{
- Node *newarg;
+ Node *newarg;
newarg = process_sublinks_mutator(lfirst(l),
(void *) &locTopQual);
if (or_clause(node))
{
- List *newargs = NIL;
- ListCell *l;
+ List *newargs = NIL;
+ ListCell *l;
foreach(l, ((BoolExpr *) node)->args)
{
- Node *newarg;
+ Node *newarg;
newarg = process_sublinks_mutator(lfirst(l),
(void *) &locTopQual);
case T_Append:
{
- ListCell *l;
+ ListCell *l;
foreach(l, ((Append *) plan)->appendplans)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.22 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepjointree.c,v 1.23 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool is_simple_subquery(Query *subquery);
static bool has_nullable_targetlist(Query *subquery);
static void resolvenew_in_jointree(Node *jtnode, int varno,
- List *rtable, List *subtlist);
+ List *rtable, List *subtlist);
static reduce_outer_joins_state *reduce_outer_joins_pass1(Node *jtnode);
static void reduce_outer_joins_pass2(Node *jtnode,
reduce_outer_joins_state *state,
* entries for upper Var references would do the wrong thing (the
* results wouldn't become NULL when they're supposed to).
*
- * XXX This could be improved by generating pseudo-variables for
- * such expressions; we'd have to figure out how to get the pseudo-
+ * XXX This could be improved by generating pseudo-variables for such
+ * expressions; we'd have to figure out how to get the pseudo-
* variables evaluated at the right place in the modified plan
* tree. Fix it someday.
*/
/*
* Need a modifiable copy of the subquery to hack on. Even if
* we didn't sometimes choose not to pull up below, we must do
- * this to avoid problems if the same subquery is referenced from
- * multiple jointree items (which can't happen normally, but might
- * after rule rewriting).
+ * this to avoid problems if the same subquery is referenced
+ * from multiple jointree items (which can't happen normally,
+ * but might after rule rewriting).
*/
subquery = copyObject(subquery);
/*
- * Pull up any IN clauses within the subquery's WHERE,
- * so that we don't leave unoptimized INs behind.
+ * Pull up any IN clauses within the subquery's WHERE, so that
+ * we don't leave unoptimized INs behind.
*/
if (subquery->hasSubLinks)
subquery->jointree->quals = pull_up_IN_clauses(subquery,
subquery->jointree->quals);
/*
- * Recursively pull up the subquery's subqueries, so that
- * this routine's processing is complete for its jointree and
+ * Recursively pull up the subquery's subqueries, so that this
+ * routine's processing is complete for its jointree and
* rangetable.
*
* Note: 'false' is correct here even if we are within an outer
* Give up, return unmodified RangeTblRef.
*
* Note: The work we just did will be redone when the
- * subquery gets planned on its own. Perhaps we could avoid
- * that by storing the modified subquery back into the
- * rangetable, but I'm not gonna risk it now.
+ * subquery gets planned on its own. Perhaps we could
+ * avoid that by storing the modified subquery back into
+ * the rangetable, but I'm not gonna risk it now.
*/
return jtnode;
}
/*
* Pull up any FOR UPDATE markers, too. (OffsetVarNodes
- * already adjusted the marker values, so just list_concat
- * the list.)
+ * already adjusted the marker values, so just list_concat the
+ * list.)
*/
parse->rowMarks = list_concat(parse->rowMarks, subquery->rowMarks);
* lists. NOTE: we put the pulled-up quals first.
*/
f->quals = (Node *) list_concat((List *) subf->quals,
- (List *) f->quals);
+ (List *) f->quals);
}
else
newlist = lappend(newlist, child);
f->fromlist = list_concat(f->fromlist,
subf->fromlist);
f->quals = (Node *) list_concat((List *) f->quals,
- (List *) subf->quals);
+ (List *) subf->quals);
}
else
f->fromlist = lappend(f->fromlist, j->rarg);
/* pulled-up quals first */
f->quals = (Node *) list_concat((List *) f->quals,
- (List *) j->quals);
+ (List *) j->quals);
return (Node *) f;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.45 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepqual.c,v 1.46 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Push down NOTs. We do this only in the top-level boolean
- * expression, without examining arguments of operators/functions.
- * The main reason for doing this is to expose as much top-level AND/OR
+ * expression, without examining arguments of operators/functions. The
+ * main reason for doing this is to expose as much top-level AND/OR
* structure as we can, so there's no point in descending further.
*/
newqual = find_nots(newqual);
* Note: we can destructively concat the subexpression's
* arglist because we know the recursive invocation of
* flatten_andors will have built a new arglist not shared
- * with any other expr. Otherwise we'd need a list_copy here.
+ * with any other expr. Otherwise we'd need a list_copy
+ * here.
*/
if (and_clause(subexpr))
out_list = list_concat(out_list,
* Note: we can destructively concat the subexpression's
* arglist because we know the recursive invocation of
* flatten_andors will have built a new arglist not shared
- * with any other expr. Otherwise we'd need a list_copy here.
+ * with any other expr. Otherwise we'd need a list_copy
+ * here.
*/
if (or_clause(subexpr))
out_list = list_concat(out_list,
*/
if (and_clause(subexpr))
out_list = list_concat(out_list,
- pull_ands(((BoolExpr *) subexpr)->args));
+ pull_ands(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
*/
if (or_clause(subexpr))
out_list = list_concat(out_list,
- pull_ors(((BoolExpr *) subexpr)->args));
+ pull_ors(((BoolExpr *) subexpr)->args));
else
out_list = lappend(out_list, subexpr);
}
* find_nots
* Traverse the qualification, looking for NOTs to take care of.
* For NOT clauses, apply push_nots() to try to push down the NOT.
- * For AND and OR clause types, simply recurse. Otherwise stop
+ * For AND and OR clause types, simply recurse. Otherwise stop
* recursing (we do not worry about structure below the top AND/OR tree).
*
* Returns the modified qualification. AND/OR flatness is preserved.
/*
* Negate an operator clause if possible: (NOT (< A B)) => (> A B)
- * Otherwise, retain the clause as it is (the NOT can't be pushed
- * down any farther).
+ * Otherwise, retain the clause as it is (the NOT can't be pushed down
+ * any farther).
*/
if (is_opclause(qual))
{
else if (not_clause((Node *) qual))
{
/*
- * Another NOT cancels this NOT, so eliminate the NOT and
- * stop negating this branch.
+ * Another NOT cancels this NOT, so eliminate the NOT and stop
+ * negating this branch.
*/
return get_notclausearg(qual);
}
else
{
/*
- * We don't know how to negate anything else, place a NOT at
- * this level.
+ * We don't know how to negate anything else, place a NOT at this
+ * level.
*/
return make_notclause(qual);
}
*
* This may seem like a fairly useless activity, but it turns out to be
* applicable to many machine-generated queries, and there are also queries
- * in some of the TPC benchmarks that need it. This was in fact almost the
+ * in some of the TPC benchmarks that need it. This was in fact almost the
* sole useful side-effect of the old prepqual code that tried to force
* the query into canonical AND-of-ORs form: the canonical equivalent of
* ((A AND B) OR (A AND C))
* OR clauses to which the inverse OR distributive law might apply.
* Only the top-level AND/OR structure is searched.
*
- * Returns the modified qualification. AND/OR flatness is preserved.
+ * Returns the modified qualification. AND/OR flatness is preserved.
*/
static Expr *
find_duplicate_ors(Expr *qual)
/* Recurse */
foreach(temp, ((BoolExpr *) qual)->args)
orlist = lappend(orlist, find_duplicate_ors(lfirst(temp)));
+
/*
* Don't need pull_ors() since this routine will never introduce
* an OR where there wasn't one before.
if (orlist == NIL)
return NULL; /* probably can't happen */
- if (list_length(orlist) == 1) /* single-expression OR (can this happen?) */
+ if (list_length(orlist) == 1) /* single-expression OR (can this
+ * happen?) */
return linitial(orlist);
/*
* Choose the shortest AND clause as the reference list --- obviously,
- * any subclause not in this clause isn't in all the clauses.
- * If we find a clause that's not an AND, we can treat it as a
- * one-element AND clause, which necessarily wins as shortest.
+ * any subclause not in this clause isn't in all the clauses. If we
+ * find a clause that's not an AND, we can treat it as a one-element
+ * AND clause, which necessarily wins as shortest.
*/
foreach(temp, orlist)
{
/*
* Check each element of the reference list to see if it's in all the
- * OR clauses. Build a new list of winning clauses.
+ * OR clauses. Build a new list of winning clauses.
*/
winners = NIL;
foreach(temp, reference)
/*
* Generate new OR list consisting of the remaining sub-clauses.
*
- * If any clause degenerates to empty, then we have a situation like
- * (A AND B) OR (A), which can be reduced to just A --- that is, the
+ * If any clause degenerates to empty, then we have a situation like (A
+ * AND B) OR (A), which can be reduced to just A --- that is, the
* additional conditions in other arms of the OR are irrelevant.
*
* Note that because we use list_difference, any multiple occurrences of
- * a winning clause in an AND sub-clause will be removed automatically.
+ * a winning clause in an AND sub-clause will be removed
+ * automatically.
*/
neworlist = NIL;
foreach(temp, orlist)
}
else
{
- neworlist = NIL; /* degenerate case, see above */
+ neworlist = NIL; /* degenerate case, see above */
break;
}
}
neworlist = lappend(neworlist, clause);
else
{
- neworlist = NIL; /* degenerate case, see above */
+ neworlist = NIL; /* degenerate case, see above */
break;
}
}
}
/*
- * Append reduced OR to the winners list, if it's not degenerate, handling
- * the special case of one element correctly (can that really happen?).
- * Also be careful to maintain AND/OR flatness in case we pulled up a
- * sub-sub-OR-clause.
+ * Append reduced OR to the winners list, if it's not degenerate,
+ * handling the special case of one element correctly (can that really
+ * happen?). Also be careful to maintain AND/OR flatness in case we
+ * pulled up a sub-sub-OR-clause.
*/
if (neworlist != NIL)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.115 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/prep/prepunion.c,v 1.116 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
adjust_inherited_attrs_context *context);
static Relids adjust_relid_set(Relids relids, Index oldrelid, Index newrelid);
static List *adjust_inherited_tlist(List *tlist,
- adjust_inherited_attrs_context *context);
+ adjust_inherited_attrs_context *context);
/*
SetOperationStmt *top_union,
List *refnames_tlist)
{
- List *child_sortclauses;
+ List *child_sortclauses;
if (IsA(setOp, SetOperationStmt))
{
if (dup_parent)
inhRTIs = NIL;
else
- inhRTIs = list_make1_int(rti); /* include original RTE in result */
+ inhRTIs = list_make1_int(rti); /* include original RTE in result */
foreach(l, inhOIDs)
{
}
/*
- * We assume that by now the planner has acquired at least AccessShareLock
- * on both rels, and so we need no additional lock now.
+ * We assume that by now the planner has acquired at least
+ * AccessShareLock on both rels, and so we need no additional lock
+ * now.
*/
oldrelation = heap_open(old_relid, NoLock);
newrelation = heap_open(new_relid, NoLock);
generate_whole_row(Var *var,
adjust_inherited_attrs_context *context)
{
- RowExpr *rowexpr;
- List *fields = NIL;
+ RowExpr *rowexpr;
+ List *fields = NIL;
int oldnatts = context->old_tupdesc->natts;
int i;
for (i = 0; i < oldnatts; i++)
{
Form_pg_attribute att = context->old_tupdesc->attrs[i];
- Var *newvar;
+ Var *newvar;
if (att->attisdropped)
{
/*
- * can't use atttypid here, but it doesn't really matter
- * what type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter what
+ * type the Const claims to be.
*/
newvar = (Var *) makeNullConst(INT4OID);
}
}
rowexpr = makeNode(RowExpr);
rowexpr->args = fields;
- rowexpr->row_typeid = var->vartype; /* report parent's rowtype */
+ rowexpr->row_typeid = var->vartype; /* report parent's rowtype */
rowexpr->row_format = COERCE_IMPLICIT_CAST;
return (Node *) rowexpr;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.179 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/clauses.c,v 1.180 2004/08/29 05:06:44 momjian Exp $
*
* HISTORY
* AUTHOR DATE MAJOR EVENT
static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool set_coercionform_dontcare_walker(Node *node, void *context);
static Node *eval_const_expressions_mutator(Node *node,
- eval_const_expressions_context *context);
+ eval_const_expressions_context *context);
static List *simplify_or_arguments(List *args,
- bool *haveNull, bool *forceTrue);
+ bool *haveNull, bool *forceTrue);
static List *simplify_and_arguments(List *args,
- bool *haveNull, bool *forceFalse);
+ bool *haveNull, bool *forceFalse);
static Expr *simplify_function(Oid funcid, Oid result_type, List *args,
- bool allow_inline,
- eval_const_expressions_context *context);
+ bool allow_inline,
+ eval_const_expressions_context *context);
static Expr *evaluate_function(Oid funcid, Oid result_type, List *args,
HeapTuple func_tuple);
static Expr *inline_function(Oid funcid, Oid result_type, List *args,
- HeapTuple func_tuple,
- eval_const_expressions_context *context);
+ HeapTuple func_tuple,
+ eval_const_expressions_context *context);
static Node *substitute_actual_parameters(Node *expr, int nargs, List *args,
int *usecounts);
static Node *substitute_actual_parameters_mutator(Node *node,
* The idea here is that the caller has verified that the expression contains
* one or more Var or Param nodes (as appropriate for the caller's need), and
* now wishes to prove that the expression result will be NULL if any of these
- * inputs is NULL. If we return false, then the proof succeeded.
+ * inputs is NULL. If we return false, then the proof succeeded.
*/
bool
contain_nonstrict_functions(Node *clause)
if (paramInfo)
{
/*
- * Found it, so return a Const representing the param value.
- * Note that we don't copy pass-by-ref datatypes, so the
- * Const will only be valid as long as the bound parameter
- * list exists. This is okay for intended uses of
- * estimate_expression_value().
+ * Found it, so return a Const representing the param
+ * value. Note that we don't copy pass-by-ref datatypes,
+ * so the Const will only be valid as long as the bound
+ * parameter list exists. This is okay for intended uses
+ * of estimate_expression_value().
*/
int16 typLen;
bool typByVal;
bool forceTrue = false;
newargs = simplify_or_arguments(args,
- &haveNull, &forceTrue);
+ &haveNull, &forceTrue);
if (forceTrue)
return makeBoolConst(true, false);
if (haveNull)
bool forceFalse = false;
newargs = simplify_and_arguments(args,
- &haveNull, &forceFalse);
+ &haveNull, &forceFalse);
if (forceFalse)
return makeBoolConst(false, false);
if (haveNull)
Assert(list_length(args) == 1);
if (IsA(linitial(args), Const))
{
- Const *const_input = (Const *) linitial(args);
+ Const *const_input = (Const *) linitial(args);
/* NOT NULL => NULL */
if (const_input->constisnull)
* it can arise while simplifying functions.) Also, we can
* optimize field selection from a RowExpr construct.
*
- * We must however check that the declared type of the field is
- * still the same as when the FieldSelect was created --- this
- * can change if someone did ALTER COLUMN TYPE on the rowtype.
+ * We must however check that the declared type of the field is still
+ * the same as when the FieldSelect was created --- this can
+ * change if someone did ALTER COLUMN TYPE on the rowtype.
*/
FieldSelect *fselect = (FieldSelect *) node;
FieldSelect *newfselect;
}
if (arg && IsA(arg, RowExpr))
{
- RowExpr *rowexpr = (RowExpr *) arg;
+ RowExpr *rowexpr = (RowExpr *) arg;
if (fselect->fieldnum > 0 &&
fselect->fieldnum <= list_length(rowexpr->args))
{
- Node *fld = (Node *) list_nth(rowexpr->args,
- fselect->fieldnum - 1);
+ Node *fld = (Node *) list_nth(rowexpr->args,
+ fselect->fieldnum - 1);
if (rowtype_field_matches(rowexpr->row_typeid,
fselect->fieldnum,
foreach(larg, args)
{
- Node *arg = (Node *) lfirst(larg);
+ Node *arg = (Node *) lfirst(larg);
if (IsA(arg, Const))
{
- Const *const_input = (Const *) arg;
+ Const *const_input = (Const *) arg;
if (const_input->constisnull)
*haveNull = true;
else if (DatumGetBool(const_input->constvalue))
{
*forceTrue = true;
+
/*
* Once we detect a TRUE result we can just exit the loop
* immediately. However, if we ever add a notion of
else if (or_clause(arg))
{
newargs = list_concat(newargs,
- simplify_or_arguments(((BoolExpr *) arg)->args,
- haveNull, forceTrue));
+ simplify_or_arguments(((BoolExpr *) arg)->args,
+ haveNull, forceTrue));
}
else
- {
newargs = lappend(newargs, arg);
- }
}
return newargs;
foreach(larg, args)
{
- Node *arg = (Node *) lfirst(larg);
+ Node *arg = (Node *) lfirst(larg);
if (IsA(arg, Const))
{
- Const *const_input = (Const *) arg;
+ Const *const_input = (Const *) arg;
if (const_input->constisnull)
*haveNull = true;
else if (!DatumGetBool(const_input->constvalue))
{
*forceFalse = true;
+
/*
* Once we detect a FALSE result we can just exit the loop
* immediately. However, if we ever add a notion of
else if (and_clause(arg))
{
newargs = list_concat(newargs,
- simplify_and_arguments(((BoolExpr *) arg)->args,
- haveNull, forceFalse));
+ simplify_and_arguments(((BoolExpr *) arg)->args,
+ haveNull, forceFalse));
}
else
- {
newargs = lappend(newargs, arg);
- }
}
return newargs;
static void
sql_inline_error_callback(void *arg)
{
- HeapTuple func_tuple = (HeapTuple) arg;
+ HeapTuple func_tuple = (HeapTuple) arg;
Form_pg_proc funcform = (Form_pg_proc) GETSTRUCT(func_tuple);
int syntaxerrposition;
return walker(((FieldSelect *) node)->arg, context);
case T_FieldStore:
{
- FieldStore *fstore = (FieldStore *) node;
+ FieldStore *fstore = (FieldStore *) node;
if (walker(fstore->arg, context))
return true;
break;
case T_RowExpr:
{
- RowExpr *rowexpr = (RowExpr *) node;
- RowExpr *newnode;
+ RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *newnode;
FLATCOPY(newnode, rowexpr, RowExpr);
MUTATE(newnode->args, rowexpr->args, List *);
break;
case RTE_JOIN:
if (!(flags & QTW_IGNORE_JOINALIASES))
- {
MUTATE(newrte->joinaliasvars, rte->joinaliasvars, List *);
- }
break;
case RTE_FUNCTION:
MUTATE(newrte->funcexpr, rte->funcexpr, Node *);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.109 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/pathnode.c,v 1.110 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Cost fuzz;
/*
- * The fuzz factor is set at one percent of the smaller total_cost, but
- * not less than 0.01 cost units (just in case total cost is zero).
+ * The fuzz factor is set at one percent of the smaller total_cost,
+ * but not less than 0.01 cost units (just in case total cost is
+ * zero).
*
* XXX does this percentage need to be user-configurable?
*/
* possible for more than one old path to be tossed out because
* new_path dominates it.
*/
- p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
+ p1 = list_head(parent_rel->pathlist); /* cannot use foreach here */
while (p1 != NULL)
{
Path *old_path = (Path *) lfirst(p1);
int costcmp;
/*
- * As of Postgres 8.0, we use fuzzy cost comparison to avoid wasting
- * cycles keeping paths that are really not significantly different
- * in cost.
+ * As of Postgres 8.0, we use fuzzy cost comparison to avoid
+ * wasting cycles keeping paths that are really not significantly
+ * different in cost.
*/
costcmp = compare_fuzzy_path_costs(new_path, old_path, TOTAL_COST);
* slower) comparison of pathkeys. If they compare the same,
* proceed with the pathkeys comparison. Note: this test relies
* on the fact that compare_fuzzy_path_costs will only return 0 if
- * both costs are effectively equal (and, therefore, there's no need
- * to call it twice in that case).
+ * both costs are effectively equal (and, therefore, there's no
+ * need to call it twice in that case).
*/
if (costcmp == 0 ||
costcmp == compare_fuzzy_path_costs(new_path, old_path,
*/
if (compare_path_costs(new_path, old_path,
TOTAL_COST) < 0)
- remove_old = true; /* new dominates old */
+ remove_old = true; /* new dominates old */
else
- accept_new = false; /* old equals or dominates
+ accept_new = false; /* old equals or dominates
* new */
}
break;
{
Path *subpath = (Path *) lfirst(l);
- if (l == list_head(subpaths)) /* first node? */
+ if (l == list_head(subpaths)) /* first node? */
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost += subpath->total_cost;
}
pathnode->subpath = subpath;
/*
- * If the input is a subquery whose output must be unique already,
- * we don't need to do anything.
+ * If the input is a subquery whose output must be unique already, we
+ * don't need to do anything.
*/
if (rel->rtekind == RTE_SUBQUERY)
{
/*
* GROUP BY guarantees uniqueness if all the grouped columns appear in
- * the output. In our implementation this means checking they are non
+ * the output. In our implementation this means checking they are non
* resjunk columns.
*/
if (query->groupClause)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.95 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/plancat.c,v 1.96 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
if (info->indpred && varno != 1)
ChangeVarNodes((Node *) info->indpred, 1, varno, 0);
- info->predOK = false; /* set later in indxpath.c */
+ info->predOK = false; /* set later in indxpath.c */
info->unique = index->indisunique;
/* initialize cached join info to empty */
}
tlist = lappend(tlist,
- create_tl_element(makeVar(varno,
- attrno,
- att_tup->atttypid,
- att_tup->atttypmod,
- 0),
- attrno));
+ create_tl_element(makeVar(varno,
+ attrno,
+ att_tup->atttypid,
+ att_tup->atttypmod,
+ 0),
+ attrno));
}
heap_close(relation, AccessShareLock);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.61 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/relnode.c,v 1.62 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Collect all the clauses that syntactically belong at this level.
*/
rlist = list_concat(subbuild_joinrel_restrictlist(joinrel,
- outer_rel->joininfo),
+ outer_rel->joininfo),
subbuild_joinrel_restrictlist(joinrel,
- inner_rel->joininfo));
+ inner_rel->joininfo));
/*
* Eliminate duplicate and redundant clauses.
* but we can use a shallow copy.
*/
restrictlist = list_concat(restrictlist,
- list_copy(joininfo->jinfo_restrictinfo));
+ list_copy(joininfo->jinfo_restrictinfo));
}
else
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.29 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/restrictinfo.c,v 1.30 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static RestrictInfo *make_restrictinfo_internal(Expr *clause,
- Expr *orclause,
- bool is_pushed_down,
- bool valid_everywhere);
+ Expr *orclause,
+ bool is_pushed_down,
+ bool valid_everywhere);
static Expr *make_sub_restrictinfos(Expr *clause,
- bool is_pushed_down,
- bool valid_everywhere);
+ bool is_pushed_down,
+ bool valid_everywhere);
static RestrictInfo *join_clause_is_redundant(Query *root,
RestrictInfo *rinfo,
List *reference_list,
/* Else we need an OR RestrictInfo structure */
foreach(orlist, indexclauses)
{
- List *andlist = (List *) lfirst(orlist);
+ List *andlist = (List *) lfirst(orlist);
/* Create AND subclause with RestrictInfos */
withris = lappend(withris, make_ands_explicit(andlist));
withoutris = lappend(withoutris, make_ands_explicit(andlist));
}
return list_make1(make_restrictinfo_internal(make_orclause(withoutris),
- make_orclause(withris),
- is_pushed_down,
- valid_everywhere));
+ make_orclause(withris),
+ is_pushed_down,
+ valid_everywhere));
}
/*
restrictinfo->can_join = false; /* may get set below */
/*
- * If it's a binary opclause, set up left/right relids info.
- * In any case set up the total clause relids info.
+ * If it's a binary opclause, set up left/right relids info. In any
+ * case set up the total clause relids info.
*/
if (is_opclause(clause) && list_length(((OpExpr *) clause)->args) == 2)
{
restrictinfo->right_relids = pull_varnos(get_rightop(clause));
restrictinfo->clause_relids = bms_union(restrictinfo->left_relids,
- restrictinfo->right_relids);
+ restrictinfo->right_relids);
/*
* Does it look like a normal join clause, i.e., a binary operator
* relating expressions that come from distinct relations? If so
- * we might be able to use it in a join algorithm. Note that this
+ * we might be able to use it in a join algorithm. Note that this
* is a purely syntactic test that is made regardless of context.
*/
if (!bms_is_empty(restrictinfo->left_relids) &&
}
/*
- * Fill in all the cacheable fields with "not yet set" markers.
- * None of these will be computed until/unless needed. Note in
- * particular that we don't mark a binary opclause as mergejoinable
- * or hashjoinable here; that happens only if it appears in the right
+ * Fill in all the cacheable fields with "not yet set" markers. None
+ * of these will be computed until/unless needed. Note in particular
+ * that we don't mark a binary opclause as mergejoinable or
+ * hashjoinable here; that happens only if it appears in the right
* context (top level of a joinclause list).
*/
restrictinfo->eval_cost.startup = -1;
/*
* If there are any redundant clauses, we want to eliminate the ones
- * that are more expensive in favor of the ones that are less so.
- * Run cost_qual_eval() to ensure the eval_cost fields are set up.
+ * that are more expensive in favor of the ones that are less so. Run
+ * cost_qual_eval() to ensure the eval_cost fields are set up.
*/
cost_qual_eval(&cost, restrictinfo_list);
/*
- * We don't have enough knowledge yet to be able to estimate the number
- * of times a clause might be evaluated, so it's hard to weight the
- * startup and per-tuple costs appropriately. For now just weight 'em
- * the same.
+ * We don't have enough knowledge yet to be able to estimate the
+ * number of times a clause might be evaluated, so it's hard to weight
+ * the startup and per-tuple costs appropriately. For now just weight
+ * 'em the same.
*/
#define CLAUSECOST(r) ((r)->eval_cost.startup + (r)->eval_cost.per_tuple)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.66 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/tlist.c,v 1.67 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
TargetEntry *
create_tl_element(Var *var, int resdomno)
{
- Oid vartype;
- int32 vartypmod;
+ Oid vartype;
+ int32 vartypmod;
if (IsA(var, Var))
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.61 2004/08/29 04:12:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/optimizer/util/var.c,v 1.62 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* flatten_join_alias_vars
* Replace Vars that reference JOIN outputs with references to the original
* relation variables instead. This allows quals involving such vars to be
- * pushed down. Whole-row Vars that reference JOIN relations are expanded
+ * pushed down. Whole-row Vars that reference JOIN relations are expanded
* into RowExpr constructs that name the individual output Vars. This
* is necessary since we will not scan the JOIN as a base relation, which
* is the only way that the executor can directly handle whole-row Vars.
if (var->varattno == InvalidAttrNumber)
{
/* Must expand whole-row reference */
- RowExpr *rowexpr;
- List *fields = NIL;
+ RowExpr *rowexpr;
+ List *fields = NIL;
AttrNumber attnum;
- ListCell *l;
+ ListCell *l;
attnum = 0;
foreach(l, rte->joinaliasvars)
var->varno,
attnum))
continue;
+
/*
* If we are expanding an alias carried down from an upper
* query, must adjust its varlevelsup fields.
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.310 2004/08/29 04:12:35 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/analyze.c,v 1.311 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Query *transformStmt(ParseState *pstate, Node *stmt,
List **extras_before, List **extras_after);
static Query *transformViewStmt(ParseState *pstate, ViewStmt *stmt,
- List **extras_before, List **extras_after);
+ List **extras_before, List **extras_after);
static Query *transformDeleteStmt(ParseState *pstate, DeleteStmt *stmt);
static Query *transformInsertStmt(ParseState *pstate, InsertStmt *stmt,
List **extras_before, List **extras_after);
transformViewStmt(ParseState *pstate, ViewStmt *stmt,
List **extras_before, List **extras_after)
{
- Query *result = makeNode(Query);
+ Query *result = makeNode(Query);
result->commandType = CMD_UTILITY;
result->utilityStmt = (Node *) stmt;
extras_before, extras_after);
/*
- * If a list of column names was given, run through and insert
- * these into the actual query tree. - thomas 2000-03-08
+ * If a list of column names was given, run through and insert these
+ * into the actual query tree. - thomas 2000-03-08
*
* Outer loop is over targetlist to make it easier to skip junk
* targetlist entries.
*/
if (stmt->aliases != NIL)
{
- ListCell *alist_item = list_head(stmt->aliases);
- ListCell *targetList;
+ ListCell *alist_item = list_head(stmt->aliases);
+ ListCell *targetList;
foreach(targetList, stmt->query->targetList)
{
rd->resname = pstrdup(strVal(lfirst(alist_item)));
alist_item = lnext(alist_item);
if (alist_item == NULL)
- break; /* done assigning aliases */
+ break; /* done assigning aliases */
}
if (alist_item != NULL)
/*
* Determine namespace and name to use for the sequence.
*
- * Although we use ChooseRelationName, it's not guaranteed that
- * the selected sequence name won't conflict; given sufficiently
- * long field names, two different serial columns in the same table
+ * Although we use ChooseRelationName, it's not guaranteed that the
+ * selected sequence name won't conflict; given sufficiently long
+ * field names, two different serial columns in the same table
* could be assigned the same sequence name, and we'd not notice
* since we aren't creating the sequence quite yet. In practice
- * this seems quite unlikely to be a problem, especially since
- * few people would need two serial columns in one table.
+ * this seems quite unlikely to be a problem, especially since few
+ * people would need two serial columns in one table.
*/
snamespaceid = RangeVarGetCreationNamespace(cxt->relation);
snamespace = get_namespace_name(snamespaceid);
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = FALSE;
saw_nullable = true;
break;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("conflicting NULL/NOT NULL declarations for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->is_not_null = TRUE;
saw_nullable = true;
break;
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("multiple default values specified for column \"%s\" of table \"%s\"",
- column->colname, cxt->relation->relname)));
+ column->colname, cxt->relation->relname)));
column->raw_default = constraint->raw_expr;
Assert(constraint->cooked_expr == NULL);
break;
errmsg("multiple primary keys for table \"%s\" are not allowed",
cxt->relation->relname)));
cxt->pkey = index;
+
/*
* In ALTER TABLE case, a primary index might already exist,
* but DefineIndex will check for it.
if (equal(index->indexParams, priorindex->indexParams))
{
/*
- * If the prior index is as yet unnamed, and this one
- * is named, then transfer the name to the prior
- * index. This ensures that if we have named and
- * unnamed constraints, we'll use (at least one of)
- * the names for the index.
+ * If the prior index is as yet unnamed, and this one is
+ * named, then transfer the name to the prior index. This
+ * ensures that if we have named and unnamed constraints,
+ * we'll use (at least one of) the names for the index.
*/
if (priorindex->idxname == NULL)
priorindex->idxname = index->idxname;
/*
* For CREATE TABLE or ALTER TABLE ADD COLUMN, gin up an ALTER TABLE
- * ADD CONSTRAINT command to execute after the basic command is complete.
- * (If called from ADD CONSTRAINT, that routine will add the FK constraints
- * to its own subcommand list.)
+ * ADD CONSTRAINT command to execute after the basic command is
+ * complete. (If called from ADD CONSTRAINT, that routine will add the
+ * FK constraints to its own subcommand list.)
*
* Note: the ADD CONSTRAINT command must also execute after any index
* creation commands. Thus, this should run after
foreach(fkclist, cxt->fkconstraints)
{
FkConstraint *fkconstraint = (FkConstraint *) lfirst(fkclist);
- AlterTableCmd *altercmd = makeNode(AlterTableCmd);
+ AlterTableCmd *altercmd = makeNode(AlterTableCmd);
altercmd->subtype = AT_ProcessedConstraint;
altercmd->name = NULL;
stmt->whereClause = transformWhereClause(pstate, stmt->whereClause,
"WHERE");
- if (list_length(pstate->p_rtable) != 2) /* naughty, naughty... */
+ if (list_length(pstate->p_rtable) != 2) /* naughty, naughty... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
errmsg("rule WHERE condition may not contain references to other relations")));
static void
applyColumnNames(List *dst, List *src)
{
- ListCell *dst_item = list_head(dst);
- ListCell *src_item = list_head(src);
+ ListCell *dst_item = list_head(dst);
+ ListCell *src_item = list_head(src);
if (list_length(src) > list_length(dst))
ereport(ERROR,
{
/*
* Resjunk nodes need no additional processing, but be sure
- * they have resnos that do not match any target columns;
- * else rewriter or planner might get confused. They don't
- * need a resname either.
+ * they have resnos that do not match any target columns; else
+ * rewriter or planner might get confused. They don't need a
+ * resname either.
*/
resnode->resno = (AttrNumber) pstate->p_next_resno++;
resnode->resname = NULL;
*l;
List *newcmds = NIL;
bool skipValidation = true;
- AlterTableCmd *newcmd;
+ AlterTableCmd *newcmd;
cxt.stmtType = "ALTER TABLE";
cxt.relation = stmt->relation;
/*
* The only subtypes that currently require parse transformation
- * handling are ADD COLUMN and ADD CONSTRAINT. These largely
- * re-use code from CREATE TABLE.
+ * handling are ADD COLUMN and ADD CONSTRAINT. These largely re-use
+ * code from CREATE TABLE.
*/
foreach(lcmd, stmt->cmds)
{
- AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
+ AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd);
switch (cmd->subtype)
{
case AT_AddColumn:
- {
- ColumnDef *def = (ColumnDef *) cmd->def;
+ {
+ ColumnDef *def = (ColumnDef *) cmd->def;
- Assert(IsA(cmd->def, ColumnDef));
- transformColumnDefinition(pstate, &cxt,
- (ColumnDef *) cmd->def);
+ Assert(IsA(cmd->def, ColumnDef));
+ transformColumnDefinition(pstate, &cxt,
+ (ColumnDef *) cmd->def);
- /*
- * If the column has a non-null default, we can't skip
- * validation of foreign keys.
- */
- if (((ColumnDef *) cmd->def)->raw_default != NULL)
- skipValidation = false;
+ /*
+ * If the column has a non-null default, we can't skip
+ * validation of foreign keys.
+ */
+ if (((ColumnDef *) cmd->def)->raw_default != NULL)
+ skipValidation = false;
- newcmds = lappend(newcmds, cmd);
+ newcmds = lappend(newcmds, cmd);
- /*
- * Convert an ADD COLUMN ... NOT NULL constraint to a separate
- * command
- */
- if (def->is_not_null)
- {
- /* Remove NOT NULL from AddColumn */
- def->is_not_null = false;
-
- /* Add as a separate AlterTableCmd */
- newcmd = makeNode(AlterTableCmd);
- newcmd->subtype = AT_SetNotNull;
- newcmd->name = pstrdup(def->colname);
- newcmds = lappend(newcmds, newcmd);
+ /*
+ * Convert an ADD COLUMN ... NOT NULL constraint to a
+ * separate command
+ */
+ if (def->is_not_null)
+ {
+ /* Remove NOT NULL from AddColumn */
+ def->is_not_null = false;
+
+ /* Add as a separate AlterTableCmd */
+ newcmd = makeNode(AlterTableCmd);
+ newcmd->subtype = AT_SetNotNull;
+ newcmd->name = pstrdup(def->colname);
+ newcmds = lappend(newcmds, newcmd);
+ }
+
+ /*
+ * All constraints are processed in other ways. Remove
+ * the original list
+ */
+ def->constraints = NIL;
+
+ break;
}
+ case AT_AddConstraint:
/*
- * All constraints are processed in other ways.
- * Remove the original list
+ * The original AddConstraint cmd node doesn't go to
+ * newcmds
*/
- def->constraints = NIL;
-
- break;
- }
- case AT_AddConstraint:
- /* The original AddConstraint cmd node doesn't go to newcmds */
if (IsA(cmd->def, Constraint))
transformTableConstraint(pstate, &cxt,
case AT_ProcessedConstraint:
/*
- * Already-transformed ADD CONSTRAINT, so just make it look
- * like the standard case.
+ * Already-transformed ADD CONSTRAINT, so just make it
+ * look like the standard case.
*/
cmd->subtype = AT_AddConstraint;
newcmds = lappend(newcmds, cmd);
transformFKConstraints(pstate, &cxt, skipValidation, true);
/*
- * Push any index-creation commands into the ALTER, so that
- * they can be scheduled nicely by tablecmds.c.
+ * Push any index-creation commands into the ALTER, so that they can
+ * be scheduled nicely by tablecmds.c.
*/
foreach(l, cxt.alist)
{
- Node *idxstmt = (Node *) lfirst(l);
+ Node *idxstmt = (Node *) lfirst(l);
Assert(IsA(idxstmt, IndexStmt));
newcmd = makeNode(AlterTableCmd);
{
int nparams = list_length(stmt->params);
int nexpected = list_length(paramtypes);
- ListCell *l, *l2;
+ ListCell *l,
+ *l2;
int i = 1;
if (nparams != nexpected)
if (pstate->p_hasSubLinks)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot use subquery in EXECUTE parameter")));
+ errmsg("cannot use subquery in EXECUTE parameter")));
if (pstate->p_hasAggs)
ereport(ERROR,
(errcode(ERRCODE_GROUPING_ERROR),
- errmsg("cannot use aggregate function in EXECUTE parameter")));
+ errmsg("cannot use aggregate function in EXECUTE parameter")));
given_type_id = exprType(expr);
if (qry->hasAggs)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE is not allowed with aggregate functions")));
+ errmsg("SELECT FOR UPDATE is not allowed with aggregate functions")));
}
/*
rte->requiredPerms |= ACL_SELECT_FOR_UPDATE;
break;
case RTE_SUBQUERY:
+
/*
* FOR UPDATE of subquery is propagated to subquery's
* rels
switch (rte->rtekind)
{
case RTE_RELATION:
- if (!list_member_int(rowMarks, i)) /* avoid duplicates */
+ if (!list_member_int(rowMarks, i)) /* avoid duplicates */
rowMarks = lappend_int(rowMarks, i);
rte->requiredPerms |= ACL_SELECT_FOR_UPDATE;
break;
case RTE_SUBQUERY:
+
/*
* FOR UPDATE of subquery is propagated to
* subquery's rels
break;
case RTE_JOIN:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE cannot be applied to a join")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE cannot be applied to a join")));
break;
case RTE_SPECIAL:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE cannot be applied to NEW or OLD")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE cannot be applied to NEW or OLD")));
break;
case RTE_FUNCTION:
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("SELECT FOR UPDATE cannot be applied to a function")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("SELECT FOR UPDATE cannot be applied to a function")));
break;
default:
elog(ERROR, "unrecognized RTE type: %d",
case T_IndexStmt:
{
- IndexStmt *elp = (IndexStmt *) element;
+ IndexStmt *elp = (IndexStmt *) element;
setSchemaName(cxt.schemaname, &elp->relation->schemaname);
cxt.indexes = lappend(cxt.indexes, element);
if (param->paramtype != context->paramTypes[paramno - 1])
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_PARAMETER),
- errmsg("could not determine data type of parameter $%d",
- paramno)));
+ errmsg("could not determine data type of parameter $%d",
+ paramno)));
}
return false;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.64 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_agg.c,v 1.65 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Found an ungrouped local variable; generate error message */
Assert(var->varno > 0 &&
- (int) var->varno <= list_length(context->pstate->p_rtable));
+ (int) var->varno <= list_length(context->pstate->p_rtable));
rte = rt_fetch(var->varno, context->pstate->p_rtable);
attname = get_rte_attribute_name(rte, var->varattno);
if (context->sublevels_up == 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.135 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_clause.c,v 1.136 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If we find an explicit reference to the rel later during parse
* analysis, scanRTEForColumn will add the ACL_SELECT bit back again.
- * That can't happen for INSERT but it is possible for UPDATE and DELETE.
+ * That can't happen for INSERT but it is possible for UPDATE and
+ * DELETE.
*/
rte->requiredPerms = requiredPerms;
{
List *new_colnames = NIL;
List *new_colvars = NIL;
- ListCell *lnames, *lvars;
+ ListCell *lnames,
+ *lvars;
Assert(list_length(src_colnames) == list_length(src_colvars));
transformJoinUsingClause(ParseState *pstate, List *leftVars, List *rightVars)
{
Node *result = NULL;
- ListCell *lvars, *rvars;
+ ListCell *lvars,
+ *rvars;
/*
* We cheat a little bit here by building an untransformed operator
*
* Small tweak for 7.4.3: ignore matches in upper query levels.
* This effectively changes the search order for bare names to
- * (1) local FROM variables, (2) local targetlist aliases,
- * (3) outer FROM variables, whereas before it was (1) (3) (2).
- * SQL92 and SQL99 do not allow GROUPing BY an outer reference,
- * so this breaks no cases that are legal per spec, and it
- * seems a more self-consistent behavior.
+ * (1) local FROM variables, (2) local targetlist aliases, (3)
+ * outer FROM variables, whereas before it was (1) (3) (2).
+ * SQL92 and SQL99 do not allow GROUPing BY an outer
+ * reference, so this breaks no cases that are legal per spec,
+ * and it seems a more self-consistent behavior.
*/
if (colNameToVar(pstate, name, true) != NULL)
name = NULL;
if (!equal(target_result->expr, tle->expr))
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_COLUMN),
- /* translator: first %s is name of a SQL construct, eg ORDER BY */
+
+ /*
+ * translator: first %s is name of a SQL
+ * construct, eg ORDER BY
+ */
errmsg("%s \"%s\" is ambiguous",
clauseText[clause], name)));
}
else
{
*sortClause = addTargetToSortList(pstate, tle,
- *sortClause, *targetlist,
+ *sortClause, *targetlist,
SORTBY_ASC, NIL, true);
/*
addAllTargetsToSortList(ParseState *pstate, List *sortlist,
List *targetlist, bool resolveUnknown)
{
- ListCell *l;
+ ListCell *l;
foreach(l, targetlist)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.122 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_coerce.c,v 2.123 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *coerce_type_typmod(Node *node,
- Oid targetTypeId, int32 targetTypMod,
- CoercionForm cformat, bool isExplicit,
- bool hideInputCoercion);
+ Oid targetTypeId, int32 targetTypMod,
+ CoercionForm cformat, bool isExplicit,
+ bool hideInputCoercion);
static void hide_coercion_node(Node *node);
static Node *build_coercion_expression(Node *node, Oid funcId,
- Oid targetTypeId, int32 targetTypMod,
- CoercionForm cformat, bool isExplicit);
+ Oid targetTypeId, int32 targetTypMod,
+ CoercionForm cformat, bool isExplicit);
static Node *coerce_record_to_complex(ParseState *pstate, Node *node,
- Oid targetTypeId,
- CoercionContext ccontext,
- CoercionForm cformat);
+ Oid targetTypeId,
+ CoercionContext ccontext,
+ CoercionForm cformat);
/*
CoercionContext ccontext,
CoercionForm cformat)
{
- Node *result;
+ Node *result;
if (!can_coerce_type(1, &exprtype, &targettype, ccontext))
return NULL;
/*
* If the target is a fixed-length type, it may need a length coercion
- * as well as a type coercion. If we find ourselves adding both,
+ * as well as a type coercion. If we find ourselves adding both,
* force the inner coercion node to implicit display form.
*/
result = coerce_type_typmod(result,
* Generate an expression tree representing run-time
* application of the conversion function. If we are dealing
* with a domain target type, the conversion function will
- * yield the base type (and we assume targetTypeMod must be -1).
+ * yield the base type (and we assume targetTypeMod must be
+ * -1).
*/
Oid baseTypeId = getBaseType(targetTypeId);
result = build_coercion_expression(node, funcId,
baseTypeId, targetTypeMod,
cformat,
- (cformat != COERCE_IMPLICIT_CAST));
+ (cformat != COERCE_IMPLICIT_CAST));
/*
* If domain, coerce to the domain type and relabel with
continue;
/*
- * If input is RECORD and target is a composite type, assume
- * we can coerce (may need tighter checking here)
+ * If input is RECORD and target is a composite type, assume we
+ * can coerce (may need tighter checking here)
*/
if (inputTypeId == RECORDOID &&
ISCOMPLEX(targetTypeId))
* Mark a coercion node as IMPLICIT so it will never be displayed by
* ruleutils.c. We use this when we generate a nest of coercion nodes
* to implement what is logically one conversion; the inner nodes are
- * forced to IMPLICIT_CAST format. This does not change their semantics,
+ * forced to IMPLICIT_CAST format. This does not change their semantics,
* only display behavior.
*
* It is caller error to call this on something that doesn't have a
procstruct = (Form_pg_proc) GETSTRUCT(tp);
/*
- * Asserts essentially check that function is a legal coercion function.
- * We can't make the seemingly obvious tests on prorettype and
- * proargtypes[0], because of various binary-compatibility cases.
+ * Asserts essentially check that function is a legal coercion
+ * function. We can't make the seemingly obvious tests on prorettype
+ * and proargtypes[0], because of various binary-compatibility cases.
*/
/* Assert(targetTypeId == procstruct->prorettype); */
Assert(!procstruct->proretset);
CoercionContext ccontext,
CoercionForm cformat)
{
- RowExpr *rowexpr;
+ RowExpr *rowexpr;
TupleDesc tupdesc;
List *args = NIL;
List *newargs;
else if (node && IsA(node, Var) &&
((Var *) node)->varattno == InvalidAttrNumber)
{
- int rtindex = ((Var *) node)->varno;
- int sublevels_up = ((Var *) node)->varlevelsup;
- List *rtable;
+ int rtindex = ((Var *) node)->varno;
+ int sublevels_up = ((Var *) node)->varlevelsup;
+ List *rtable;
rtable = GetLevelNRangeTable(pstate, sublevels_up);
expandRTE(rtable, rtindex, sublevels_up, false, NULL, &args);
arg = list_head(args);
for (i = 0; i < tupdesc->natts; i++)
{
- Node *expr;
- Oid exprtype;
+ Node *expr;
+ Oid exprtype;
/* Fill in NULLs for dropped columns in rowtype */
if (tupdesc->attrs[i]->attisdropped)
{
/*
- * can't use atttypid here, but it doesn't really matter
- * what type the Const claims to be.
+ * can't use atttypid here, but it doesn't really matter what
+ * type the Const claims to be.
*/
newargs = lappend(newargs, makeNullConst(INT4OID));
continue;
format_type_be(targetTypeId)),
errdetail("Cannot cast type %s to %s in column %d.",
format_type_be(exprtype),
- format_type_be(tupdesc->attrs[i]->atttypid),
+ format_type_be(tupdesc->attrs[i]->atttypid),
ucolno)));
newargs = lappend(newargs, expr);
ucolno++;
*/
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
+
/*
* translator: first %s is name of a SQL construct, eg
* CASE
if (OidIsValid(elem_typeid) && actual_type != elem_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyelement\" are not all alike"),
+ errmsg("arguments declared \"anyelement\" are not all alike"),
errdetail("%s versus %s",
format_type_be(elem_typeid),
format_type_be(actual_type))));
if (OidIsValid(array_typeid) && actual_type != array_typeid)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("arguments declared \"anyarray\" are not all alike"),
+ errmsg("arguments declared \"anyarray\" are not all alike"),
errdetail("%s versus %s",
format_type_be(array_typeid),
format_type_be(actual_type))));
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(elem_typeid))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(elem_typeid))));
}
return array_typeid;
}
if (!OidIsValid(array_typeid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("could not find array type for data type %s",
- format_type_be(context_actual_type))));
+ errmsg("could not find array type for data type %s",
+ format_type_be(context_actual_type))));
return array_typeid;
}
}
{
/*
* If there's no pg_cast entry, perhaps we are dealing with a pair
- * of array types. If so, and if the element types have a suitable
- * cast, use array_type_coerce() or array_type_length_coerce().
+ * of array types. If so, and if the element types have a
+ * suitable cast, use array_type_coerce() or
+ * array_type_length_coerce().
*/
Oid targetElemType;
Oid sourceElemType;
else
{
/* does the function take a typmod arg? */
- Oid argtypes[FUNC_MAX_ARGS];
- int nargs;
+ Oid argtypes[FUNC_MAX_ARGS];
+ int nargs;
(void) get_func_signature(elemfuncid, argtypes, &nargs);
if (nargs > 1)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.175 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_expr.c,v 1.176 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *transformColumnRef(ParseState *pstate, ColumnRef *cref);
static Node *transformWholeRowRef(ParseState *pstate, char *schemaname,
- char *relname);
+ char *relname);
static Node *transformIndirection(ParseState *pstate, Node *basenode,
List *indirection);
static Node *typecast_expression(ParseState *pstate, Node *expr,
TypeName *typename);
static Node *make_row_op(ParseState *pstate, List *opname,
- Node *ltree, Node *rtree);
+ Node *ltree, Node *rtree);
static Node *make_row_distinct_op(ParseState *pstate, List *opname,
- Node *ltree, Node *rtree);
+ Node *ltree, Node *rtree);
static Expr *make_distinct_op(ParseState *pstate, List *opname,
- Node *ltree, Node *rtree);
+ Node *ltree, Node *rtree);
/*
}
case T_A_Indirection:
{
- A_Indirection *ind = (A_Indirection *) expr;
+ A_Indirection *ind = (A_Indirection *) expr;
result = transformExpr(pstate, ind->arg);
result = transformIndirection(pstate, result,
*/
if (Transform_null_equals &&
list_length(a->name) == 1 &&
- strcmp(strVal(linitial(a->name)), "=") == 0 &&
+ strcmp(strVal(linitial(a->name)), "=") == 0 &&
(exprIsNullConstant(lexpr) ||
exprIsNullConstant(rexpr)))
{
{
/*
* Convert "row op subselect" into a
- * MULTIEXPR sublink. Formerly the grammar
- * did this, but now that a row construct is
- * allowed anywhere in expressions, it's
- * easier to do it here.
+ * MULTIEXPR sublink. Formerly the
+ * grammar did this, but now that a row
+ * construct is allowed anywhere in
+ * expressions, it's easier to do it here.
*/
- SubLink *s = (SubLink *) rexpr;
+ SubLink *s = (SubLink *) rexpr;
s->subLinkType = MULTIEXPR_SUBLINK;
s->lefthand = ((RowExpr *) lexpr)->args;
rexpr = coerce_to_boolean(pstate, rexpr, "AND");
result = (Node *) makeBoolExpr(AND_EXPR,
- list_make2(lexpr,
- rexpr));
+ list_make2(lexpr,
+ rexpr));
}
break;
case AEXPR_OR:
rexpr = coerce_to_boolean(pstate, rexpr, "OR");
result = (Node *) makeBoolExpr(OR_EXPR,
- list_make2(lexpr,
- rexpr));
+ list_make2(lexpr,
+ rexpr));
}
break;
case AEXPR_NOT:
rexpr = coerce_to_boolean(pstate, rexpr, "NOT");
result = (Node *) makeBoolExpr(NOT_EXPR,
- list_make1(rexpr));
+ list_make1(rexpr));
}
break;
case AEXPR_OP_ANY:
{
/* "row op row" */
result = make_row_distinct_op(pstate, a->name,
- lexpr, rexpr);
+ lexpr, rexpr);
}
else
{
rexpr = transformExpr(pstate, rexpr);
result = (Node *) make_distinct_op(pstate,
- a->name,
+ a->name,
lexpr,
rexpr);
}
* copy and then transform-in-place to avoid O(N^2)
* behavior from repeated lappend's.
*
- * XXX: repeated lappend() would no longer result in
- * O(n^2) behavior; worth reconsidering this design?
+ * XXX: repeated lappend() would no longer result in O(n^2)
+ * behavior; worth reconsidering this design?
*/
targs = list_copy(fn->args);
foreach(args, targs)
* (ignoring resjunk targets).
*/
if (tlist_item == NULL ||
- ((TargetEntry *) lfirst(tlist_item))->resdom->resjunk)
+ ((TargetEntry *) lfirst(tlist_item))->resdom->resjunk)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("subquery must return a column")));
+ errmsg("subquery must return a column")));
while ((tlist_item = lnext(tlist_item)) != NULL)
{
if (!((TargetEntry *) lfirst(tlist_item))->resdom->resjunk)
if (ll_item == NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("subquery has too many columns")));
+ errmsg("subquery has too many columns")));
lexpr = lfirst(ll_item);
ll_item = lnext(ll_item);
if (ll_item != NULL)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("subquery has too few columns")));
+ errmsg("subquery has too few columns")));
if (needNot)
{
{
/* shorthand form was specified, so expand... */
warg = (Node *) makeSimpleA_Expr(AEXPR_OP, "=",
- (Node *) placeholder,
+ (Node *) placeholder,
warg);
}
neww->expr = (Expr *) transformExpr(pstate, warg);
case T_RowExpr:
{
- RowExpr *r = (RowExpr *) expr;
- RowExpr *newr = makeNode(RowExpr);
+ RowExpr *r = (RowExpr *) expr;
+ RowExpr *newr = makeNode(RowExpr);
List *newargs = NIL;
ListCell *arg;
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated
- * as a single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a
+ * single multidimensional subscript operation.
*/
foreach(i, indirection)
{
- Node *n = lfirst(i);
+ Node *n = lfirst(i);
if (IsA(n, A_Indices))
- {
subscripts = lappend(subscripts, n);
- }
else
{
Assert(IsA(n, String));
if (subscripts)
result = (Node *) transformArraySubscripts(pstate,
result,
- exprType(result),
+ exprType(result),
InvalidOid,
-1,
subscripts,
}
/*
- * Try to find the name as a relation. Note that only
- * relations already entered into the rangetable will be
- * recognized.
+ * Try to find the name as a relation. Note that only
+ * relations already entered into the rangetable will
+ * be recognized.
*
* This is a hack for backwards compatibility with
* PostQUEL-inspired syntax. The preferred form now
*/
node = transformWholeRowRef(pstate, NULL, name1);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name2)),
+ list_make1(makeString(name2)),
list_make1(node),
false, false, true);
}
/* Try it as a function call */
node = transformWholeRowRef(pstate, name1, name2);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name3)),
+ list_make1(makeString(name3)),
list_make1(node),
false, false, true);
}
/* Try it as a function call */
node = transformWholeRowRef(pstate, name2, name3);
node = ParseFuncOrColumn(pstate,
- list_make1(makeString(name4)),
+ list_make1(makeString(name4)),
list_make1(node),
false, false, true);
}
}
break;
default:
+
/*
- * RTE is a join or subselect. We represent this as a whole-row
- * Var of RECORD type. (Note that in most cases the Var will
- * be expanded to a RowExpr during planning, but that is not
- * our concern here.)
+ * RTE is a join or subselect. We represent this as a
+ * whole-row Var of RECORD type. (Note that in most cases the
+ * Var will be expanded to a RowExpr during planning, but that
+ * is not our concern here.)
*/
result = (Node *) makeVar(vnum,
InvalidAttrNumber,
make_row_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree)
{
Node *result = NULL;
- RowExpr *lrow,
+ RowExpr *lrow,
*rrow;
List *largs,
*rargs;
(strcmp(oprname, "<=") == 0) ||
(strcmp(oprname, ">") == 0) ||
(strcmp(oprname, ">=") == 0))
- {
boolop = AND_EXPR;
- }
else if (strcmp(oprname, "<>") == 0)
- {
boolop = OR_EXPR;
- }
else
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("operator %s is not supported for row expressions",
- oprname)));
- boolop = 0; /* keep compiler quiet */
+ errmsg("operator %s is not supported for row expressions",
+ oprname)));
+ boolop = 0; /* keep compiler quiet */
}
forboth(l, largs, r, rargs)
{
- Node *larg = (Node *) lfirst(l);
- Node *rarg = (Node *) lfirst(r);
- Node *cmp;
+ Node *larg = (Node *) lfirst(l);
+ Node *rarg = (Node *) lfirst(r);
+ Node *cmp;
cmp = (Node *) make_op(pstate, opname, larg, rarg);
cmp = coerce_to_boolean(pstate, cmp, "row comparison");
Node *ltree, Node *rtree)
{
Node *result = NULL;
- RowExpr *lrow,
+ RowExpr *lrow,
*rrow;
List *largs,
*rargs;
forboth(l, largs, r, rargs)
{
- Node *larg = (Node *) lfirst(l);
- Node *rarg = (Node *) lfirst(r);
- Node *cmp;
+ Node *larg = (Node *) lfirst(l);
+ Node *rarg = (Node *) lfirst(r);
+ Node *cmp;
cmp = (Node *) make_distinct_op(pstate, opname, larg, rarg);
if (result == NULL)
static Expr *
make_distinct_op(ParseState *pstate, List *opname, Node *ltree, Node *rtree)
{
- Expr *result;
+ Expr *result;
result = make_op(pstate, opname, ltree, rtree);
if (((OpExpr *) result)->opresulttype != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+ errmsg("IS DISTINCT FROM requires = operator to yield boolean")));
+
/*
- * We rely on DistinctExpr and OpExpr being
- * same struct
+ * We rely on DistinctExpr and OpExpr being same struct
*/
NodeSetTag(result, T_DistinctExpr);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.173 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_func.c,v 1.174 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static Node *ParseComplexProjection(ParseState *pstate, char *funcname,
- Node *first_arg);
+ Node *first_arg);
static Oid **argtype_inherit(int nargs, Oid *argtypes);
static int find_inheritors(Oid relid, Oid **supervec);
first_arg);
if (retval)
return retval;
+
/*
- * If ParseComplexProjection doesn't recognize it as a projection,
- * just press on.
+ * If ParseComplexProjection doesn't recognize it as a
+ * projection, just press on.
*/
}
}
func_signature_string(funcname, nargs,
actual_arg_types)),
errhint("No function matches the given name and argument types. "
- "You may need to add explicit type casts.")));
+ "You may need to add explicit type casts.")));
}
/*
ListCell *queue_item;
/*
- * Begin the search at the relation itself, so add relid to the
- * queue.
+ * Begin the search at the relation itself, so add relid to the queue.
*/
queue = list_make1_oid(relid);
visited = NIL;
inhrel = heap_openr(InheritsRelationName, AccessShareLock);
/*
- * Use queue to do a breadth-first traversal of the inheritance
- * graph from the relid supplied up to the root. Notice that we
- * append to the queue inside the loop --- this is okay because
- * the foreach() macro doesn't advance queue_item until the next
- * loop iteration begins.
+ * Use queue to do a breadth-first traversal of the inheritance graph
+ * from the relid supplied up to the root. Notice that we append to
+ * the queue inside the loop --- this is okay because the foreach()
+ * macro doesn't advance queue_item until the next loop iteration
+ * begins.
*/
foreach(queue_item, queue)
{
- Oid this_relid = lfirst_oid(queue_item);
- ScanKeyData skey;
- HeapScanDesc inhscan;
- HeapTuple inhtup;
+ Oid this_relid = lfirst_oid(queue_item);
+ ScanKeyData skey;
+ HeapScanDesc inhscan;
+ HeapTuple inhtup;
/* If we've seen this relid already, skip it */
if (list_member_oid(visited, this_relid))
/*
* Okay, this is a not-yet-seen relid. Add it to the list of
* already-visited OIDs, then find all the types this relid
- * inherits from and add them to the queue. The one exception
- * is we don't add the original relation to 'visited'.
+ * inherits from and add them to the queue. The one exception is
+ * we don't add the original relation to 'visited'.
*/
if (queue_item != list_head(queue))
visited = lappend_oid(visited, this_relid);
/*
* Special case for whole-row Vars so that we can resolve (foo.*).bar
- * even when foo is a reference to a subselect, join, or RECORD function.
- * A bonus is that we avoid generating an unnecessary FieldSelect; our
- * result can omit the whole-row Var and just be a Var for the selected
- * field.
+ * even when foo is a reference to a subselect, join, or RECORD
+ * function. A bonus is that we avoid generating an unnecessary
+ * FieldSelect; our result can omit the whole-row Var and just be a
+ * Var for the selected field.
*/
if (IsA(first_arg, Var) &&
((Var *) first_arg)->varattno == InvalidAttrNumber)
else if (relTypeId == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("could not identify column \"%s\" in record data type",
- attname)));
+ errmsg("could not identify column \"%s\" in record data type",
+ attname)));
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.85 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_node.c,v 1.86 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* element. If any of the items are double subscripts (lower:upper),
* then the subscript expression means an array slice operation. In
* this case, we supply a default lower bound of 1 for any items that
- * contain only a single subscript. We have to prescan the indirection
- * list to see if there are any double subscripts.
+ * contain only a single subscript. We have to prescan the
+ * indirection list to see if there are any double subscripts.
*/
foreach(idx, indirection)
{
/*
* If doing an array store, coerce the source value to the right type.
- * (This should agree with the coercion done by updateTargetListEntry.)
+ * (This should agree with the coercion done by
+ * updateTargetListEntry.)
*/
if (assignFrom != NULL)
{
" but expression is of type %s",
format_type_be(typeneeded),
format_type_be(typesource)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.79 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_oper.c,v 1.80 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Operator optup;
/*
- * Look for an "=" operator for the datatype. We require it to be
- * an exact or binary-compatible match, since most callers are not
+ * Look for an "=" operator for the datatype. We require it to be an
+ * exact or binary-compatible match, since most callers are not
* prepared to cope with adding any run-time type coercion steps.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_EQ_OPR);
*/
if (oproid == ARRAY_EQ_OP)
{
- Oid elem_type = get_element_type(argtype);
+ Oid elem_type = get_element_type(argtype);
if (OidIsValid(elem_type))
{
oproid = InvalidOid; /* element type has no "=" */
}
else
- oproid = InvalidOid; /* bogus array type? */
+ oproid = InvalidOid; /* bogus array type? */
}
if (OidIsValid(oproid))
Operator optup;
/*
- * Look for a "<" operator for the datatype. We require it to be
- * an exact or binary-compatible match, since most callers are not
+ * Look for a "<" operator for the datatype. We require it to be an
+ * exact or binary-compatible match, since most callers are not
* prepared to cope with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a "<"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and grouping
- * purposes.
+ * returned by equality_oper. This is critical for sorting and
+ * grouping purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_LT_OPR);
oproid = typentry->lt_opr;
*/
if (oproid == ARRAY_LT_OP)
{
- Oid elem_type = get_element_type(argtype);
+ Oid elem_type = get_element_type(argtype);
if (OidIsValid(elem_type))
{
oproid = InvalidOid; /* element type has no "<" */
}
else
- oproid = InvalidOid; /* bogus array type? */
+ oproid = InvalidOid; /* bogus array type? */
}
if (OidIsValid(oproid))
Operator optup;
/*
- * Look for a ">" operator for the datatype. We require it to be
- * an exact or binary-compatible match, since most callers are not
+ * Look for a ">" operator for the datatype. We require it to be an
+ * exact or binary-compatible match, since most callers are not
* prepared to cope with adding any run-time type coercion steps.
*
* Note: the search algorithm used by typcache.c ensures that if a ">"
* operator is returned, it will be consistent with the "=" operator
- * returned by equality_oper. This is critical for sorting and grouping
- * purposes.
+ * returned by equality_oper. This is critical for sorting and
+ * grouping purposes.
*/
typentry = lookup_type_cache(argtype, TYPECACHE_GT_OPR);
oproid = typentry->gt_opr;
*/
if (oproid == ARRAY_GT_OP)
{
- Oid elem_type = get_element_type(argtype);
+ Oid elem_type = get_element_type(argtype);
if (OidIsValid(elem_type))
{
oproid = InvalidOid; /* element type has no ">" */
}
else
- oproid = InvalidOid; /* bogus array type? */
+ oproid = InvalidOid; /* bogus array type? */
}
if (OidIsValid(oproid))
binary_oper_exact(Oid arg1, Oid arg2,
FuncCandidateList candidates)
{
- FuncCandidateList cand;
+ FuncCandidateList cand;
bool was_unknown = false;
/* Unspecified type for one of the arguments? then use the other */
if (was_unknown)
{
/* arg1 and arg2 are the same here, need only look at arg1 */
- Oid basetype = getBaseType(arg1);
+ Oid basetype = getBaseType(arg1);
if (basetype != arg1)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.99 2004/08/29 04:12:41 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_relation.c,v 1.100 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
RangeTblEntry *rte1, const char *aliasname1);
static bool isForUpdate(ParseState *pstate, char *refname);
static void expandRelation(Oid relid, Alias *eref,
- int rtindex, int sublevels_up,
- bool include_dropped,
- List **colnames, List **colvars);
+ int rtindex, int sublevels_up,
+ bool include_dropped,
+ List **colnames, List **colvars);
static int specialAttNum(const char *attname);
static void warnAutoRange(ParseState *pstate, RangeVar *relation);
return NULL;
if (IsA(nsnode, RangeTblRef))
{
- int varno = ((RangeTblRef *) nsnode)->rtindex;
+ int varno = ((RangeTblRef *) nsnode)->rtindex;
RangeTblEntry *rte = rt_fetch(varno, pstate->p_rtable);
if (strcmp(rte->eref->aliasname, refname) == 0)
* Scan the user column names (or aliases) for a match. Complain if
* multiple matches.
*
- * Note: eref->colnames may include entries for dropped columns,
- * but those will be empty strings that cannot match any legal SQL
+ * Note: eref->colnames may include entries for dropped columns, but
+ * those will be empty strings that cannot match any legal SQL
* identifier, so we don't bother to test for that case here.
*
* Should this somehow go wrong and we try to access a dropped column,
* we'll still catch it by virtue of the checks in
- * get_rte_attribute_type(), which is called by make_var(). That routine
- * has to do a cache lookup anyway, so the check there is cheap.
+ * get_rte_attribute_type(), which is called by make_var(). That
+ * routine has to do a cache lookup anyway, so the check there is
+ * cheap.
*/
foreach(c, rte->eref->colnames)
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_COLUMN_REFERENCE),
errmsg("table \"%s\" has %d columns available but %d columns specified",
- eref->aliasname, maxattrs - numdropped, numaliases)));
+ eref->aliasname, maxattrs - numdropped, numaliases)));
}
/*
rte->relid = RelationGetRelid(rel);
/*
- * Build the list of effective column names using user-supplied aliases
- * and/or actual column names.
+ * Build the list of effective column names using user-supplied
+ * aliases and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
rte->inFromCl = inFromCl;
rte->requiredPerms = ACL_SELECT;
- rte->checkAsUser = 0; /* not set-uid by default, either */
+ rte->checkAsUser = 0; /* not set-uid by default, either */
/*
* Add completed RTE to pstate's range table list, but not to join
rte->relid = relid;
/*
- * Build the list of effective column names using user-supplied aliases
- * and/or actual column names.
+ * Build the list of effective column names using user-supplied
+ * aliases and/or actual column names.
*/
rte->eref = makeAlias(refname, NIL);
buildRelationAliases(rel->rd_att, alias, rte->eref);
rte->inFromCl = inFromCl;
rte->requiredPerms = ACL_SELECT;
- rte->checkAsUser = 0; /* not set-uid by default, either */
+ rte->checkAsUser = 0; /* not set-uid by default, either */
/*
* Add completed RTE to pstate's range table list, but not to join
/* fill in any unspecified alias columns */
if (numaliases < list_length(colnames))
eref->colnames = list_concat(eref->colnames,
- list_copy_tail(colnames, numaliases));
+ list_copy_tail(colnames, numaliases));
rte->eref = eref;
case RTE_SUBQUERY:
{
/* Subquery RTE */
- ListCell *aliasp_item = list_head(rte->eref->colnames);
- ListCell *tlistitem;
+ ListCell *aliasp_item = list_head(rte->eref->colnames);
+ ListCell *tlistitem;
varattno = 0;
foreach(tlistitem, rte->subquery->targetList)
*/
if (colnames)
*colnames = lappend(*colnames,
- linitial(rte->eref->colnames));
+ linitial(rte->eref->colnames));
if (colvars)
{
case RTE_JOIN:
{
/* Join RTE */
- ListCell *colname;
- ListCell *aliasvar;
+ ListCell *colname;
+ ListCell *aliasvar;
Assert(list_length(rte->eref->colnames) == list_length(rte->joinaliasvars));
varattno = 0;
- forboth (colname, rte->eref->colnames, aliasvar, rte->joinaliasvars)
+ forboth(colname, rte->eref->colnames, aliasvar, rte->joinaliasvars)
{
varattno++;
{
if (colnames)
*colnames = lappend(*colnames,
- makeString(pstrdup("")));
+ makeString(pstrdup("")));
if (colvars)
{
/*
* can't use atttypid here, but it doesn't
- * really matter what type the Const claims to
- * be.
+ * really matter what type the Const
+ * claims to be.
*/
*colvars = lappend(*colvars,
- makeNullConst(INT4OID));
+ makeNullConst(INT4OID));
}
}
continue;
if (colvars)
{
/*
- * can't use atttypid here, but it doesn't really matter
- * what type the Const claims to be.
+ * can't use atttypid here, but it doesn't really
+ * matter what type the Const claims to be.
*/
*colvars = lappend(*colvars, makeNullConst(INT4OID));
}
te_list = lappend(te_list, te);
}
- Assert(name == NULL && var == NULL); /* lists not the same length? */
+ Assert(name == NULL && var == NULL); /* lists not the same
+ * length? */
return te_list;
}
{
case RTE_RELATION:
{
- /* Plain relation RTE --- get the attribute's catalog entry */
+ /*
+ * Plain relation RTE --- get the attribute's catalog
+ * entry
+ */
HeapTuple tp;
Form_pg_attribute att_tup;
case RTE_JOIN:
{
/*
- * A join RTE would not have dropped columns when constructed,
- * but one in a stored rule might contain columns that were
- * dropped from the underlying tables, if said columns are
- * nowhere explicitly referenced in the rule. So we have to
- * recursively look at the referenced column.
+ * A join RTE would not have dropped columns when
+ * constructed, but one in a stored rule might contain
+ * columns that were dropped from the underlying tables,
+ * if said columns are nowhere explicitly referenced in
+ * the rule. So we have to recursively look at the
+ * referenced column.
*/
- Var *aliasvar;
+ Var *aliasvar;
if (attnum <= 0 ||
attnum > list_length(rte->joinaliasvars))
elog(ERROR, "invalid varattno %d", attnum);
aliasvar = (Var *) list_nth(rte->joinaliasvars, attnum - 1);
+
/*
* If the list item isn't a simple Var, then it must
* represent a merged column, ie a USING column, and so it
else
result = get_rte_attribute_is_dropped(rtable,
aliasvar->varno,
- aliasvar->varattno);
+ aliasvar->varattno);
}
break;
case RTE_FUNCTION:
TargetEntry *
get_tle_by_resno(List *tlist, AttrNumber resno)
{
- ListCell *l;
+ ListCell *l;
foreach(l, tlist)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.124 2004/08/29 04:12:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_target.c,v 1.125 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Check for "something.*". Depending on the complexity of the
- * "something", the star could appear as the last name in ColumnRef,
- * or as the last indirection item in A_Indirection.
+ * "something", the star could appear as the last name in
+ * ColumnRef, or as the last indirection item in A_Indirection.
*/
if (IsA(res->val, ColumnRef))
{
}
else if (IsA(res->val, A_Indirection))
{
- A_Indirection *ind = (A_Indirection *) res->val;
- Node *lastitem = llast(ind->indirection);
+ A_Indirection *ind = (A_Indirection *) res->val;
+ Node *lastitem = llast(ind->indirection);
if (IsA(lastitem, String) &&
strcmp(strVal(lastitem), "*") == 0)
{
/* It is something.*, expand into multiple items */
p_target = list_concat(p_target,
- ExpandIndirectionStar(pstate, ind));
+ ExpandIndirectionStar(pstate, ind));
continue;
}
}
if (IsA(linitial(indirection), A_Indices))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot set an array element to DEFAULT")));
+ errmsg("cannot set an array element to DEFAULT")));
else
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
/*
* If there is indirection on the target column, prepare an array or
- * subfield assignment expression. This will generate a new column value
- * that the source value has been inserted into, which can then be placed
- * in the new tuple constructed by INSERT or UPDATE.
+ * subfield assignment expression. This will generate a new column
+ * value that the source value has been inserted into, which can then
+ * be placed in the new tuple constructed by INSERT or UPDATE.
*/
if (indirection)
{
if (pstate->p_is_insert)
{
/*
- * The command is INSERT INTO table (col.something) ...
- * so there is not really a source value to work with.
- * Insert a NULL constant as the source value.
+ * The command is INSERT INTO table (col.something) ... so
+ * there is not really a source value to work with. Insert a
+ * NULL constant as the source value.
*/
colVar = (Node *) makeNullConst(attrtype);
}
colname,
format_type_be(attrtype),
format_type_be(type_id)),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
/*
*/
resnode->restype = attrtype;
resnode->restypmod = attrtypmod;
+
/*
* Set the resno to identify the target column --- the rewriter and
- * planner depend on this. We also set the resname to identify the
+ * planner depend on this. We also set the resname to identify the
* target column, but this is only for debugging purposes; it should
* not be relied on. (In particular, it might be out of date in a
* stored rule.)
/*
* We have to split any field-selection operations apart from
- * subscripting. Adjacent A_Indices nodes have to be treated
- * as a single multidimensional subscript operation.
+ * subscripting. Adjacent A_Indices nodes have to be treated as a
+ * single multidimensional subscript operation.
*/
for_each_cell(i, indirection)
{
- Node *n = lfirst(i);
+ Node *n = lfirst(i);
if (IsA(n, A_Indices))
{
else
{
FieldStore *fstore;
- Oid typrelid;
- AttrNumber attnum;
- Oid fieldTypeId;
- int32 fieldTypMod;
+ Oid typrelid;
+ AttrNumber attnum;
+ Oid fieldTypeId;
+ int32 fieldTypMod;
Assert(IsA(n, String));
/* process subscripts before this field selection */
if (subscripts)
{
- Oid elementTypeId = transformArrayType(targetTypeId);
- Oid typeNeeded = isSlice ? targetTypeId : elementTypeId;
+ Oid elementTypeId = transformArrayType(targetTypeId);
+ Oid typeNeeded = isSlice ? targetTypeId : elementTypeId;
/* recurse to create appropriate RHS for array assign */
rhs = transformAssignmentIndirection(pstate,
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" not found in data type %s",
- strVal(n), format_type_be(targetTypeId))));
+ strVal(n), format_type_be(targetTypeId))));
if (attnum < 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
/* process trailing subscripts, if any */
if (subscripts)
{
- Oid elementTypeId = transformArrayType(targetTypeId);
- Oid typeNeeded = isSlice ? targetTypeId : elementTypeId;
+ Oid elementTypeId = transformArrayType(targetTypeId);
+ Oid typeNeeded = isSlice ? targetTypeId : elementTypeId;
/* recurse to create appropriate RHS for array assign */
rhs = transformAssignmentIndirection(pstate,
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
else
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
targetName,
format_type_be(targetTypeId),
format_type_be(exprType(rhs))),
- errhint("You will need to rewrite or cast the expression.")));
+ errhint("You will need to rewrite or cast the expression.")));
}
return result;
if (list_member_int(*attrnos, attrno))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
wholecols = lappend_int(wholecols, attrno);
}
else
if (list_member_int(wholecols, attrno))
ereport(ERROR,
(errcode(ERRCODE_DUPLICATE_COLUMN),
- errmsg("column \"%s\" specified more than once",
- name)));
+ errmsg("column \"%s\" specified more than once",
+ name)));
}
*attrnos = lappend_int(*attrnos, attrno);
relname = strVal(lsecond(fields));
break;
case 4:
- {
- char *name1 = strVal(linitial(fields));
-
- /*
- * We check the catalog name and then ignore
- * it.
- */
- if (strcmp(name1, get_database_name(MyDatabaseId)) != 0)
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cross-database references are not implemented: %s",
- NameListToString(fields))));
- schemaname = strVal(lsecond(fields));
- relname = strVal(lthird(fields));
- break;
- }
+ {
+ char *name1 = strVal(linitial(fields));
+
+ /*
+ * We check the catalog name and then ignore it.
+ */
+ if (strcmp(name1, get_database_name(MyDatabaseId)) != 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cross-database references are not implemented: %s",
+ NameListToString(fields))));
+ schemaname = strVal(lsecond(fields));
+ relname = strVal(lthird(fields));
+ break;
+ }
default:
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
else
{
elog(ERROR, "unrecognized node type: %d", (int) nodeTag(n));
- rtindex = 0; /* keep compiler quiet */
+ rtindex = 0; /* keep compiler quiet */
}
/*
/* find last field name, if any, ignoring "*" */
foreach(l, ((ColumnRef *) node)->fields)
{
- Node *i = lfirst(l);
+ Node *i = lfirst(l);
if (strcmp(strVal(i), "*") != 0)
fname = strVal(i);
/* find last field name, if any, ignoring "*" */
foreach(l, ind->indirection)
{
- Node *i = lfirst(l);
+ Node *i = lfirst(l);
if (IsA(i, String) &&
strcmp(strVal(i), "*") != 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.70 2004/08/29 04:12:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/parse_type.c,v 1.71 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (attnum == InvalidAttrNumber)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("column \"%s\" of relation \"%s\" does not exist",
- field, rel->relname)));
+ errmsg("column \"%s\" of relation \"%s\" does not exist",
+ field, rel->relname)));
restype = get_atttype(relid, attnum);
/* this construct should never have an array indicator */
const char *str = (const char *) arg;
errcontext("invalid type name \"%s\"", str);
+
/*
- * Currently we just suppress any syntax error position report,
- * rather than transforming to an "internal query" error. It's
- * unlikely that a type name is complex enough to need positioning.
+ * Currently we just suppress any syntax error position report, rather
+ * than transforming to an "internal query" error. It's unlikely that
+ * a type name is complex enough to need positioning.
*/
errposition(0);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.27 2004/08/29 04:12:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/parser/scansup.c,v 1.28 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int i;
result = palloc(len + 1);
+
/*
- * SQL99 specifies Unicode-aware case normalization, which we don't yet
- * have the infrastructure for. Instead we use tolower() to provide a
- * locale-aware translation. However, there are some locales where this
- * is not right either (eg, Turkish may do strange things with 'i' and
- * 'I'). Our current compromise is to use tolower() for characters with
- * the high bit set, and use an ASCII-only downcasing for 7-bit
- * characters.
+ * SQL99 specifies Unicode-aware case normalization, which we don't
+ * yet have the infrastructure for. Instead we use tolower() to
+ * provide a locale-aware translation. However, there are some
+ * locales where this is not right either (eg, Turkish may do strange
+ * things with 'i' and 'I'). Our current compromise is to use
+ * tolower() for characters with the high bit set, and use an
+ * ASCII-only downcasing for 7-bit characters.
*/
for (i = 0; i < len; i++)
{
- unsigned char ch = (unsigned char) ident[i];
+ unsigned char ch = (unsigned char) ident[i];
if (ch >= 'A' && ch <= 'Z')
ch += 'a' - 'A';
{
if (len >= NAMEDATALEN)
{
- len = pg_mbcliplen(ident, len, NAMEDATALEN-1);
+ len = pg_mbcliplen(ident, len, NAMEDATALEN - 1);
if (warn)
ereport(NOTICE,
(errcode(ERRCODE_NAME_TOO_LONG),
- errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
- ident, len, ident)));
+ errmsg("identifier \"%s\" will be truncated to \"%.*s\"",
+ ident, len, ident)));
ident[len] = '\0';
}
}
return (pid == -1 ? -1 : pstat);
}
-#endif /* OS X < 10.3 */
+#endif /* OS X < 10.3 */
#define pg_dlclose dlclose
#define pg_dlerror dlerror
-char* dlerror(void);
-int dlclose(void *handle);
-void* dlsym(void *handle, const char *symbol);
-void* dlopen(const char *path, int mode);
+char *dlerror(void);
+int dlclose(void *handle);
+void *dlsym(void *handle, const char *symbol);
+void *dlopen(const char *path, int mode);
#endif /* PORT_PROTOS_H */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.14 2004/08/29 04:12:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/ipc_test.c,v 1.15 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
void
-on_shmem_exit(void (*function) (int code, Datum arg), Datum arg)
+ on_shmem_exit(void (*function) (int code, Datum arg), Datum arg)
{
if (on_shmem_exit_index >= MAX_ON_EXITS)
elog(FATAL, "out of on_shmem_exit slots");
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.35 2004/08/29 04:12:42 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/sysv_shmem.c,v 1.36 2004/08/29 05:06:44 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
ereport(FATAL,
(errmsg("could not create shared memory segment: %m"),
- errdetail("Failed system call was shmget(key=%lu, size=%u, 0%o).",
- (unsigned long) memKey, size,
- IPC_CREAT | IPC_EXCL | IPCProtection),
+ errdetail("Failed system call was shmget(key=%lu, size=%u, 0%o).",
+ (unsigned long) memKey, size,
+ IPC_CREAT | IPC_EXCL | IPCProtection),
(errno == EINVAL) ?
errhint("This error usually means that PostgreSQL's request for a shared memory "
"segment exceeded your kernel's SHMMAX parameter. You can either "
"reduce the request size or reconfigure the kernel with larger SHMMAX. "
- "To reduce the request size (currently %u bytes), reduce "
- "PostgreSQL's shared_buffers parameter (currently %d) and/or "
+ "To reduce the request size (currently %u bytes), reduce "
+ "PostgreSQL's shared_buffers parameter (currently %d) and/or "
"its max_connections parameter (currently %d).\n"
"If the request size is already small, it's possible that it is less than "
"your kernel's SHMMIN parameter, in which case raising the request size or "
/* If Exec case, just attach and return the pointer */
if (UsedShmemSegAddr != NULL && !makePrivate && IsUnderPostmaster)
{
- void* origUsedShmemSegAddr = UsedShmemSegAddr;
+ void *origUsedShmemSegAddr = UsedShmemSegAddr;
#ifdef __CYGWIN__
/* cygipc (currently) appears to not detach on exec. */
PGSharedMemoryDetach();
UsedShmemSegAddr = origUsedShmemSegAddr;
#endif
- elog(DEBUG3,"Attaching to %p",UsedShmemSegAddr);
+ elog(DEBUG3, "Attaching to %p", UsedShmemSegAddr);
hdr = PGSharedMemoryAttach((IpcMemoryKey) UsedShmemSegID, &shmid);
if (hdr == NULL)
elog(FATAL, "could not attach to proper memory at fixed address: shmget(key=%d, addr=%p) failed: %m",
(int) UsedShmemSegID, UsedShmemSegAddr);
if (hdr != origUsedShmemSegAddr)
- elog(FATAL,"attaching to shared mem returned unexpected address (got %p, expected %p)",
- hdr,UsedShmemSegAddr);
+ elog(FATAL, "attaching to shared mem returned unexpected address (got %p, expected %p)",
+ hdr, UsedShmemSegAddr);
UsedShmemSegAddr = hdr;
return hdr;
}
*
* Detach from the shared memory segment, if still attached. This is not
* intended for use by the process that originally created the segment
- * (it will have an on_shmem_exit callback registered to do that). Rather,
+ * (it will have an on_shmem_exit callback registered to do that). Rather,
* this is for subprocesses that have inherited an attachment and want to
* get rid of it.
*/
{
if ((shmdt(UsedShmemSegAddr) < 0)
#if (defined(EXEC_BACKEND) && defined(__CYGWIN__))
- /* Work-around for cygipc exec bug */
+ /* Work-around for cygipc exec bug */
&& shmdt(NULL) < 0
#endif
)
/*-------------------------------------------------------------------------
*
* error.c
- * Map win32 error codes to errno values
+ * Map win32 error codes to errno values
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.2 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/error.c,v 1.3 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
-static struct { DWORD winerr; int doserr;} doserrors[] =
+static struct
{
- { ERROR_INVALID_FUNCTION, EINVAL },
- { ERROR_FILE_NOT_FOUND, ENOENT },
- { ERROR_PATH_NOT_FOUND, ENOENT },
- { ERROR_TOO_MANY_OPEN_FILES, EMFILE },
- { ERROR_ACCESS_DENIED, EACCES },
- { ERROR_INVALID_HANDLE, EBADF },
- { ERROR_ARENA_TRASHED, ENOMEM },
- { ERROR_NOT_ENOUGH_MEMORY, ENOMEM },
- { ERROR_INVALID_BLOCK, ENOMEM },
- { ERROR_BAD_ENVIRONMENT, E2BIG },
- { ERROR_BAD_FORMAT, ENOEXEC },
- { ERROR_INVALID_ACCESS, EINVAL },
- { ERROR_INVALID_DATA, EINVAL },
- { ERROR_INVALID_DRIVE, ENOENT },
- { ERROR_CURRENT_DIRECTORY, EACCES },
- { ERROR_NOT_SAME_DEVICE, EXDEV },
- { ERROR_NO_MORE_FILES, ENOENT },
- { ERROR_LOCK_VIOLATION, EACCES },
- { ERROR_BAD_NETPATH, ENOENT },
- { ERROR_NETWORK_ACCESS_DENIED, EACCES },
- { ERROR_BAD_NET_NAME, ENOENT },
- { ERROR_FILE_EXISTS, EEXIST },
- { ERROR_CANNOT_MAKE, EACCES },
- { ERROR_FAIL_I24, EACCES },
- { ERROR_INVALID_PARAMETER, EINVAL },
- { ERROR_NO_PROC_SLOTS, EAGAIN },
- { ERROR_DRIVE_LOCKED, EACCES },
- { ERROR_BROKEN_PIPE, EPIPE },
- { ERROR_DISK_FULL, ENOSPC },
- { ERROR_INVALID_TARGET_HANDLE, EBADF },
- { ERROR_INVALID_HANDLE, EINVAL },
- { ERROR_WAIT_NO_CHILDREN, ECHILD },
- { ERROR_CHILD_NOT_COMPLETE, ECHILD },
- { ERROR_DIRECT_ACCESS_HANDLE, EBADF },
- { ERROR_NEGATIVE_SEEK, EINVAL },
- { ERROR_SEEK_ON_DEVICE, EACCES },
- { ERROR_DIR_NOT_EMPTY, ENOTEMPTY },
- { ERROR_NOT_LOCKED, EACCES },
- { ERROR_BAD_PATHNAME, ENOENT },
- { ERROR_MAX_THRDS_REACHED, EAGAIN },
- { ERROR_LOCK_FAILED, EACCES },
- { ERROR_ALREADY_EXISTS, EEXIST },
- { ERROR_FILENAME_EXCED_RANGE, ENOENT },
- { ERROR_NESTING_NOT_ALLOWED, EAGAIN },
- { ERROR_NOT_ENOUGH_QUOTA, ENOMEM }
+ DWORD winerr;
+ int doserr;
+} doserrors[] =
+
+{
+ {
+ ERROR_INVALID_FUNCTION, EINVAL
+ },
+ {
+ ERROR_FILE_NOT_FOUND, ENOENT
+ },
+ {
+ ERROR_PATH_NOT_FOUND, ENOENT
+ },
+ {
+ ERROR_TOO_MANY_OPEN_FILES, EMFILE
+ },
+ {
+ ERROR_ACCESS_DENIED, EACCES
+ },
+ {
+ ERROR_INVALID_HANDLE, EBADF
+ },
+ {
+ ERROR_ARENA_TRASHED, ENOMEM
+ },
+ {
+ ERROR_NOT_ENOUGH_MEMORY, ENOMEM
+ },
+ {
+ ERROR_INVALID_BLOCK, ENOMEM
+ },
+ {
+ ERROR_BAD_ENVIRONMENT, E2BIG
+ },
+ {
+ ERROR_BAD_FORMAT, ENOEXEC
+ },
+ {
+ ERROR_INVALID_ACCESS, EINVAL
+ },
+ {
+ ERROR_INVALID_DATA, EINVAL
+ },
+ {
+ ERROR_INVALID_DRIVE, ENOENT
+ },
+ {
+ ERROR_CURRENT_DIRECTORY, EACCES
+ },
+ {
+ ERROR_NOT_SAME_DEVICE, EXDEV
+ },
+ {
+ ERROR_NO_MORE_FILES, ENOENT
+ },
+ {
+ ERROR_LOCK_VIOLATION, EACCES
+ },
+ {
+ ERROR_BAD_NETPATH, ENOENT
+ },
+ {
+ ERROR_NETWORK_ACCESS_DENIED, EACCES
+ },
+ {
+ ERROR_BAD_NET_NAME, ENOENT
+ },
+ {
+ ERROR_FILE_EXISTS, EEXIST
+ },
+ {
+ ERROR_CANNOT_MAKE, EACCES
+ },
+ {
+ ERROR_FAIL_I24, EACCES
+ },
+ {
+ ERROR_INVALID_PARAMETER, EINVAL
+ },
+ {
+ ERROR_NO_PROC_SLOTS, EAGAIN
+ },
+ {
+ ERROR_DRIVE_LOCKED, EACCES
+ },
+ {
+ ERROR_BROKEN_PIPE, EPIPE
+ },
+ {
+ ERROR_DISK_FULL, ENOSPC
+ },
+ {
+ ERROR_INVALID_TARGET_HANDLE, EBADF
+ },
+ {
+ ERROR_INVALID_HANDLE, EINVAL
+ },
+ {
+ ERROR_WAIT_NO_CHILDREN, ECHILD
+ },
+ {
+ ERROR_CHILD_NOT_COMPLETE, ECHILD
+ },
+ {
+ ERROR_DIRECT_ACCESS_HANDLE, EBADF
+ },
+ {
+ ERROR_NEGATIVE_SEEK, EINVAL
+ },
+ {
+ ERROR_SEEK_ON_DEVICE, EACCES
+ },
+ {
+ ERROR_DIR_NOT_EMPTY, ENOTEMPTY
+ },
+ {
+ ERROR_NOT_LOCKED, EACCES
+ },
+ {
+ ERROR_BAD_PATHNAME, ENOENT
+ },
+ {
+ ERROR_MAX_THRDS_REACHED, EAGAIN
+ },
+ {
+ ERROR_LOCK_FAILED, EACCES
+ },
+ {
+ ERROR_ALREADY_EXISTS, EEXIST
+ },
+ {
+ ERROR_FILENAME_EXCED_RANGE, ENOENT
+ },
+ {
+ ERROR_NESTING_NOT_ALLOWED, EAGAIN
+ },
+ {
+ ERROR_NOT_ENOUGH_QUOTA, ENOMEM
+ }
};
-void _dosmaperr(unsigned long e)
+void
+_dosmaperr(unsigned long e)
{
- int i;
+ int i;
if (e == 0)
{
return;
}
- for (i=0; isizeof(doserrors[0]); i++)
+ for (i = 0; i < sizeof(doserrors) / sizeof(doserrors[0]); i++)
{
if (doserrors[i].winerr == e)
{
errno = doserrors[i].doserr;
ereport(DEBUG5,
(errmsg_internal("Mapped win32 error code %i to %i",
- (int)e, errno)));
+ (int) e, errno)));
return;
}
}
ereport(DEBUG4,
(errmsg_internal("Unknown win32 error code: %i",
- (int)e)));
+ (int) e)));
errno = EINVAL;
return;
}
/*-------------------------------------------------------------------------
*
* security.c
- * Microsoft Windows Win32 Security Support Functions
+ * Microsoft Windows Win32 Security Support Functions
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.3 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/security.c,v 1.4 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int
pgwin32_is_admin(void)
{
- HANDLE AccessToken;
- char *InfoBuffer = NULL;
+ HANDLE AccessToken;
+ char *InfoBuffer = NULL;
PTOKEN_GROUPS Groups;
- DWORD InfoBufferSize;
- PSID AdministratorsSid;
- PSID PowerUsersSid;
- SID_IDENTIFIER_AUTHORITY NtAuthority = { SECURITY_NT_AUTHORITY };
- UINT x;
- BOOL success;
-
- if(!OpenProcessToken(GetCurrentProcess(),TOKEN_READ,&AccessToken))
+ DWORD InfoBufferSize;
+ PSID AdministratorsSid;
+ PSID PowerUsersSid;
+ SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
+ UINT x;
+ BOOL success;
+
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_READ, &AccessToken))
{
write_stderr("failed to open process token: %d\n",
- (int)GetLastError());
+ (int) GetLastError());
exit(1);
}
- if (GetTokenInformation(AccessToken,TokenGroups,NULL,0,&InfoBufferSize))
+ if (GetTokenInformation(AccessToken, TokenGroups, NULL, 0, &InfoBufferSize))
{
write_stderr("failed to get token information - got zero size!\n");
exit(1);
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER)
{
write_stderr("failed to get token information: %d\n",
- (int)GetLastError());
+ (int) GetLastError());
exit(1);
}
if (!InfoBuffer)
{
write_stderr("failed to allocate %i bytes for token information!\n",
- (int)InfoBufferSize);
+ (int) InfoBufferSize);
exit(1);
}
- Groups = (PTOKEN_GROUPS)InfoBuffer;
+ Groups = (PTOKEN_GROUPS) InfoBuffer;
- if (!GetTokenInformation(AccessToken,TokenGroups,InfoBuffer,
+ if (!GetTokenInformation(AccessToken, TokenGroups, InfoBuffer,
InfoBufferSize, &InfoBufferSize))
{
write_stderr("failed to get token information: %d\n",
- (int)GetLastError());
+ (int) GetLastError());
exit(1);
}
CloseHandle(AccessToken);
- if(!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID,DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
- 0,&AdministratorsSid))
+ if (!AllocateAndInitializeSid(&NtAuthority, 2,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0,
+ 0, &AdministratorsSid))
{
write_stderr("failed to get SID for Administrators group: %d\n",
- (int)GetLastError());
+ (int) GetLastError());
exit(1);
}
if (!AllocateAndInitializeSid(&NtAuthority, 2,
- SECURITY_BUILTIN_DOMAIN_RID,DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
+ SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0,
0, &PowerUsersSid))
{
write_stderr("failed to get SID for PowerUsers group: %d\n",
- (int)GetLastError());
+ (int) GetLastError());
exit(1);
}
-
+
success = FALSE;
-
- for (x=0; x<Groups->GroupCount; x++)
+
+ for (x = 0; x < Groups->GroupCount; x++)
{
if (EqualSid(AdministratorsSid, Groups->Groups[x].Sid) ||
EqualSid(PowerUsersSid, Groups->Groups[x].Sid))
break;
}
}
-
+
free(InfoBuffer);
FreeSid(AdministratorsSid);
FreeSid(PowerUsersSid);
*
* 1) We are running as Local System (only used by services)
* 2) Our token contains SECURITY_SERVICE_RID (automatically added to the
- * process token by the SCM when starting a service)
+ * process token by the SCM when starting a service)
*
* Return values:
- * 0 = Not service
- * 1 = Service
- * -1 = Error
+ * 0 = Not service
+ * 1 = Service
+ * -1 = Error
*
* Note: we can't report errors via either ereport (we're called too early)
* or write_stderr (because that calls this). We are therefore reduced to
int
pgwin32_is_service(void)
{
- static int _is_service = -1;
- HANDLE AccessToken;
- UCHAR InfoBuffer[1024];
- PTOKEN_GROUPS Groups = (PTOKEN_GROUPS)InfoBuffer;
- PTOKEN_USER User = (PTOKEN_USER)InfoBuffer;
- DWORD InfoBufferSize;
- PSID ServiceSid;
- PSID LocalSystemSid;
- SID_IDENTIFIER_AUTHORITY NtAuthority = { SECURITY_NT_AUTHORITY };
- UINT x;
+ static int _is_service = -1;
+ HANDLE AccessToken;
+ UCHAR InfoBuffer[1024];
+ PTOKEN_GROUPS Groups = (PTOKEN_GROUPS) InfoBuffer;
+ PTOKEN_USER User = (PTOKEN_USER) InfoBuffer;
+ DWORD InfoBufferSize;
+ PSID ServiceSid;
+ PSID LocalSystemSid;
+ SID_IDENTIFIER_AUTHORITY NtAuthority = {SECURITY_NT_AUTHORITY};
+ UINT x;
/* Only check the first time */
if (_is_service != -1)
return _is_service;
-
- if (!OpenProcessToken(GetCurrentProcess(),TOKEN_READ,&AccessToken)) {
- fprintf(stderr,"failed to open process token: %d\n",
- (int)GetLastError());
+
+ if (!OpenProcessToken(GetCurrentProcess(), TOKEN_READ, &AccessToken))
+ {
+ fprintf(stderr, "failed to open process token: %d\n",
+ (int) GetLastError());
return -1;
}
/* First check for local system */
- if (!GetTokenInformation(AccessToken,TokenUser,InfoBuffer,1024,&InfoBufferSize)) {
- fprintf(stderr,"failed to get token information: %d\n",
- (int)GetLastError());
+ if (!GetTokenInformation(AccessToken, TokenUser, InfoBuffer, 1024, &InfoBufferSize))
+ {
+ fprintf(stderr, "failed to get token information: %d\n",
+ (int) GetLastError());
return -1;
}
-
- if (!AllocateAndInitializeSid(&NtAuthority,1,
- SECURITY_LOCAL_SYSTEM_RID,0,0,0,0,0,0,0,
- &LocalSystemSid)) {
- fprintf(stderr,"failed to get SID for local system account\n");
+
+ if (!AllocateAndInitializeSid(&NtAuthority, 1,
+ SECURITY_LOCAL_SYSTEM_RID, 0, 0, 0, 0, 0, 0, 0,
+ &LocalSystemSid))
+ {
+ fprintf(stderr, "failed to get SID for local system account\n");
CloseHandle(AccessToken);
return -1;
}
- if (EqualSid(LocalSystemSid, User->User.Sid)) {
+ if (EqualSid(LocalSystemSid, User->User.Sid))
+ {
FreeSid(LocalSystemSid);
CloseHandle(AccessToken);
_is_service = 1;
FreeSid(LocalSystemSid);
/* Now check for group SID */
- if (!GetTokenInformation(AccessToken,TokenGroups,InfoBuffer,1024,&InfoBufferSize)) {
- fprintf(stderr,"failed to get token information: %d\n",
- (int)GetLastError());
+ if (!GetTokenInformation(AccessToken, TokenGroups, InfoBuffer, 1024, &InfoBufferSize))
+ {
+ fprintf(stderr, "failed to get token information: %d\n",
+ (int) GetLastError());
return -1;
}
- if (!AllocateAndInitializeSid(&NtAuthority,1,
- SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
- &ServiceSid)) {
- fprintf(stderr,"failed to get SID for service group\n");
+ if (!AllocateAndInitializeSid(&NtAuthority, 1,
+ SECURITY_SERVICE_RID, 0, 0, 0, 0, 0, 0, 0,
+ &ServiceSid))
+ {
+ fprintf(stderr, "failed to get SID for service group\n");
CloseHandle(AccessToken);
return -1;
}
_is_service = 0;
for (x = 0; x < Groups->GroupCount; x++)
{
- if (EqualSid(ServiceSid, Groups->Groups[x].Sid))
+ if (EqualSid(ServiceSid, Groups->Groups[x].Sid))
{
_is_service = 1;
break;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.7 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/sema.c,v 1.8 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (sops[0].sem_op == -1)
{
DWORD ret;
- HANDLE wh[2];
+ HANDLE wh[2];
wh[0] = cur_handle;
wh[1] = pgwin32_signal_event;
- ret = WaitForMultipleObjects(2, wh, FALSE, (sops[0].sem_flg & IPC_NOWAIT)?0:INFINITE);
+ ret = WaitForMultipleObjects(2, wh, FALSE, (sops[0].sem_flg & IPC_NOWAIT) ? 0 : INFINITE);
if (ret == WAIT_OBJECT_0)
{
sem_counts[sops[0].sem_num]--;
return 0;
}
- else if (ret == WAIT_OBJECT_0+1)
+ else if (ret == WAIT_OBJECT_0 + 1)
{
/* Signal event is set - we have a signal to deliver */
pgwin32_dispatch_queued_signals();
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.7 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/shmem.c,v 1.8 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int
shmdt(const void *shmaddr)
{
- if (UnmapViewOfFile((LPCVOID*)shmaddr))
+ if (UnmapViewOfFile((LPCVOID *) shmaddr))
return 0;
else
return -1;
/* TODO -- shmat needs to count # attached to shared mem */
void *lpmem = MapViewOfFileEx((HANDLE) memId,
FILE_MAP_WRITE | FILE_MAP_READ,
- 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */, shmaddr);
+ 0, 0, /* (DWORD)pshmdsc->segsize */ 0 /* s_segsize */ , shmaddr);
if (lpmem == NULL)
{
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.6 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/signal.c,v 1.7 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static BOOL WINAPI pg_console_handler(DWORD dwCtrlType);
/* Sleep function that can be interrupted by signals */
-void pgwin32_backend_usleep(long microsec) {
- if (WaitForSingleObject(pgwin32_signal_event, (microsec < 500 ? 1 : (microsec + 500) / 1000)) == WAIT_OBJECT_0) {
+void
+pgwin32_backend_usleep(long microsec)
+{
+ if (WaitForSingleObject(pgwin32_signal_event, (microsec < 500 ? 1 : (microsec + 500) / 1000)) == WAIT_OBJECT_0)
+ {
pgwin32_dispatch_queued_signals();
errno = EINTR;
return;
/* Create the global event handle used to flag signals */
pgwin32_signal_event = CreateEvent(NULL, TRUE, FALSE, NULL);
- if (pgwin32_signal_event == NULL)
+ if (pgwin32_signal_event == NULL)
ereport(FATAL,
- (errmsg_internal("failed to create signal event: %d", (int)GetLastError())));
+ (errmsg_internal("failed to create signal event: %d", (int) GetLastError())));
/* Create thread for handling signals */
signal_thread_handle = CreateThread(NULL, 0, pg_signal_thread, NULL, 0, NULL);
if (signal_thread_handle == NULL)
ereport(FATAL,
- (errmsg_internal("failed to create signal handler thread")));
+ (errmsg_internal("failed to create signal handler thread")));
/* Create console control handle to pick up Ctrl-C etc */
- if (!SetConsoleCtrlHandler(pg_console_handler, TRUE))
+ if (!SetConsoleCtrlHandler(pg_console_handler, TRUE))
ereport(FATAL,
- (errmsg_internal("failed to set console control handler")));
+ (errmsg_internal("failed to set console control handler")));
}
}
-/* Console control handler will execute on a thread created
+/* Console control handler will execute on a thread created
by the OS at the time of invocation */
-static BOOL WINAPI pg_console_handler(DWORD dwCtrlType) {
+static BOOL WINAPI
+pg_console_handler(DWORD dwCtrlType)
+{
if (dwCtrlType == CTRL_C_EVENT ||
dwCtrlType == CTRL_BREAK_EVENT ||
dwCtrlType == CTRL_CLOSE_EVENT ||
- dwCtrlType == CTRL_SHUTDOWN_EVENT) {
+ dwCtrlType == CTRL_SHUTDOWN_EVENT)
+ {
pg_queue_signal(SIGINT);
return TRUE;
}
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.3 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/socket.c,v 1.4 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
/*
- * Convert the last socket error code into errno
+ * Convert the last socket error code into errno
*/
-static void TranslateSocketError(void) {
- switch (WSAGetLastError()) {
- case WSANOTINITIALISED:
- case WSAENETDOWN:
- case WSAEINPROGRESS:
- case WSAEINVAL:
- case WSAESOCKTNOSUPPORT:
- case WSAEFAULT:
- case WSAEINVALIDPROVIDER:
- case WSAEINVALIDPROCTABLE:
- case WSAEMSGSIZE:
+static void
+TranslateSocketError(void)
+{
+ switch (WSAGetLastError())
+ {
+ case WSANOTINITIALISED:
+ case WSAENETDOWN:
+ case WSAEINPROGRESS:
+ case WSAEINVAL:
+ case WSAESOCKTNOSUPPORT:
+ case WSAEFAULT:
+ case WSAEINVALIDPROVIDER:
+ case WSAEINVALIDPROCTABLE:
+ case WSAEMSGSIZE:
errno = EINVAL;
break;
case WSAEAFNOSUPPORT:
case WSAESHUTDOWN:
case WSAECONNABORTED:
case WSAEDISCON:
- errno = ECONNREFUSED; /*ENOTCONN?*/
+ errno = ECONNREFUSED; /* ENOTCONN? */
break;
default:
ereport(NOTICE,
- (errmsg_internal("Unknown win32 socket error code: %i",WSAGetLastError())));
+ (errmsg_internal("Unknown win32 socket error code: %i", WSAGetLastError())));
errno = EINVAL;
}
}
-static int pgwin32_poll_signals(void) {
- if (WaitForSingleObject(pgwin32_signal_event,0) == WAIT_OBJECT_0) {
+static int
+pgwin32_poll_signals(void)
+{
+ if (WaitForSingleObject(pgwin32_signal_event, 0) == WAIT_OBJECT_0)
+ {
pgwin32_dispatch_queued_signals();
errno = EINTR;
return 1;
return 0;
}
-static int pgwin32_waitforsinglesocket(SOCKET s, int what) {
+static int
+pgwin32_waitforsinglesocket(SOCKET s, int what)
+{
static HANDLE waitevent = INVALID_HANDLE_VALUE;
- HANDLE events[2];
- int r;
+ HANDLE events[2];
+ int r;
- if (waitevent == INVALID_HANDLE_VALUE) {
+ if (waitevent == INVALID_HANDLE_VALUE)
+ {
waitevent = CreateEvent(NULL, TRUE, FALSE, NULL);
if (waitevent == INVALID_HANDLE_VALUE)
ereport(ERROR,
- (errmsg_internal("Failed to create socket waiting event: %i",(int)GetLastError())));
+ (errmsg_internal("Failed to create socket waiting event: %i", (int) GetLastError())));
}
- else
- if (!ResetEvent(waitevent))
- ereport(ERROR,
- (errmsg_internal("Failed to reset socket waiting event: %i",(int)GetLastError())));
+ else if (!ResetEvent(waitevent))
+ ereport(ERROR,
+ (errmsg_internal("Failed to reset socket waiting event: %i", (int) GetLastError())));
+
-
- if (WSAEventSelect(s, waitevent, what) == SOCKET_ERROR) {
+ if (WSAEventSelect(s, waitevent, what) == SOCKET_ERROR)
+ {
TranslateSocketError();
return 0;
}
events[1] = waitevent;
r = WaitForMultipleObjects(2, events, FALSE, INFINITE);
- if (r == WAIT_OBJECT_0) {
+ if (r == WAIT_OBJECT_0)
+ {
pgwin32_dispatch_queued_signals();
errno = EINTR;
return 0;
}
- if (r == WAIT_OBJECT_0+1)
+ if (r == WAIT_OBJECT_0 + 1)
return 1;
ereport(ERROR,
- (errmsg_internal("Bad return from WaitForMultipleObjects: %i (%i)",r,(int)GetLastError())));
+ (errmsg_internal("Bad return from WaitForMultipleObjects: %i (%i)", r, (int) GetLastError())));
return 0;
}
/*
- * Create a socket, setting it to overlapped and non-blocking
+ * Create a socket, setting it to overlapped and non-blocking
*/
-SOCKET pgwin32_socket(int af, int type, int protocol) {
- SOCKET s;
+SOCKET
+pgwin32_socket(int af, int type, int protocol)
+{
+ SOCKET s;
unsigned long on = 1;
s = WSASocket(af, type, protocol, NULL, 0, WSA_FLAG_OVERLAPPED);
- if (s == INVALID_SOCKET) {
+ if (s == INVALID_SOCKET)
+ {
TranslateSocketError();
return INVALID_SOCKET;
}
-
- if (ioctlsocket(s, FIONBIO, &on)) {
+
+ if (ioctlsocket(s, FIONBIO, &on))
+ {
TranslateSocketError();
return INVALID_SOCKET;
}
}
-SOCKET pgwin32_accept(SOCKET s, struct sockaddr* addr, int* addrlen) {
- SOCKET rs;
+SOCKET
+pgwin32_accept(SOCKET s, struct sockaddr * addr, int *addrlen)
+{
+ SOCKET rs;
- /* Poll for signals, but don't return with EINTR, since we don't
- handle that in pqcomm.c */
+ /*
+ * Poll for signals, but don't return with EINTR, since we don't
+ * handle that in pqcomm.c
+ */
pgwin32_poll_signals();
rs = WSAAccept(s, addr, addrlen, NULL, 0);
- if (rs == INVALID_SOCKET) {
+ if (rs == INVALID_SOCKET)
+ {
TranslateSocketError();
return INVALID_SOCKET;
}
/* No signal delivery during connect. */
-int pgwin32_connect(SOCKET s, const struct sockaddr *addr, int addrlen) {
- int r;
+int
+pgwin32_connect(SOCKET s, const struct sockaddr * addr, int addrlen)
+{
+ int r;
r = WSAConnect(s, addr, addrlen, NULL, NULL, NULL, NULL);
if (r == 0)
return 0;
- if (WSAGetLastError() != WSAEWOULDBLOCK) {
+ if (WSAGetLastError() != WSAEWOULDBLOCK)
+ {
TranslateSocketError();
return -1;
}
- while (pgwin32_waitforsinglesocket(s, FD_CONNECT) == 0) {
+ while (pgwin32_waitforsinglesocket(s, FD_CONNECT) == 0)
+ {
/* Loop endlessly as long as we are just delivering signals */
}
return 0;
}
-int pgwin32_recv(SOCKET s, char *buf, int len, int f) {
- WSABUF wbuf;
- int r;
- DWORD b;
- DWORD flags = f;
+int
+pgwin32_recv(SOCKET s, char *buf, int len, int f)
+{
+ WSABUF wbuf;
+ int r;
+ DWORD b;
+ DWORD flags = f;
if (pgwin32_poll_signals())
return -1;
wbuf.len = len;
wbuf.buf = buf;
- r = WSARecv(s, &wbuf, 1, &b, &flags, NULL, NULL);
- if (r != SOCKET_ERROR && b > 0)
+ r = WSARecv(s, &wbuf, 1, &b, &flags, NULL, NULL);
+ if (r != SOCKET_ERROR && b > 0)
/* Read succeeded right away */
- return b;
+ return b;
if (r == SOCKET_ERROR &&
- WSAGetLastError() != WSAEWOULDBLOCK) {
+ WSAGetLastError() != WSAEWOULDBLOCK)
+ {
TranslateSocketError();
return -1;
}
/* No error, zero bytes (win2000+) or error+WSAEWOULDBLOCK (<=nt4) */
- if (pgwin32_waitforsinglesocket(s, FD_READ | FD_CLOSE | FD_ACCEPT) == 0)
+ if (pgwin32_waitforsinglesocket(s, FD_READ | FD_CLOSE | FD_ACCEPT) == 0)
return -1;
r = WSARecv(s, &wbuf, 1, &b, &flags, NULL, NULL);
- if (r == SOCKET_ERROR) {
+ if (r == SOCKET_ERROR)
+ {
TranslateSocketError();
return -1;
}
return b;
}
-int pgwin32_send(SOCKET s, char *buf, int len, int flags) {
- WSABUF wbuf;
- int r;
- DWORD b;
+int
+pgwin32_send(SOCKET s, char *buf, int len, int flags)
+{
+ WSABUF wbuf;
+ int r;
+ DWORD b;
if (pgwin32_poll_signals())
return -1;
wbuf.buf = buf;
r = WSASend(s, &wbuf, 1, &b, flags, NULL, NULL);
- if (r != SOCKET_ERROR && b > 0)
+ if (r != SOCKET_ERROR && b > 0)
/* Write succeeded right away */
return b;
-
+
if (r == SOCKET_ERROR &&
- WSAGetLastError() != WSAEWOULDBLOCK) {
+ WSAGetLastError() != WSAEWOULDBLOCK)
+ {
TranslateSocketError();
return -1;
}
return -1;
r = WSASend(s, &wbuf, 1, &b, flags, NULL, NULL);
- if (r == SOCKET_ERROR) {
+ if (r == SOCKET_ERROR)
+ {
TranslateSocketError();
return -1;
}
/*
* Wait for activity on one or more sockets.
- * While waiting, allow signals to run
+ * While waiting, allow signals to run
*
* NOTE! Currently does not implement exceptfds check,
* since it is not used in postgresql!
*/
-int pgwin32_select(int nfds, fd_set* readfds, fd_set* writefds, fd_set* exceptfds, const struct timeval* timeout) {
- WSAEVENT events[FD_SETSIZE*2]; /* worst case is readfds totally different
- * from writefds, so 2*FD_SETSIZE sockets */
- SOCKET sockets[FD_SETSIZE*2];
- int numevents=0;
- int i;
- int r;
- DWORD timeoutval = WSA_INFINITE;
- FD_SET outreadfds;
- FD_SET outwritefds;
- int nummatches = 0;
+int
+pgwin32_select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout)
+{
+ WSAEVENT events[FD_SETSIZE * 2]; /* worst case is readfds totally
+ * different from writefds, so
+ * 2*FD_SETSIZE sockets */
+ SOCKET sockets[FD_SETSIZE * 2];
+ int numevents = 0;
+ int i;
+ int r;
+ DWORD timeoutval = WSA_INFINITE;
+ FD_SET outreadfds;
+ FD_SET outwritefds;
+ int nummatches = 0;
Assert(exceptfds == NULL);
FD_ZERO(&outreadfds);
FD_ZERO(&outwritefds);
- /* Write FDs are different in the way that it is only flagged by
- * WSASelectEvent() if we have tried to write to them first. So try
- * an empty write */
- if (writefds) {
- for (i = 0; i < writefds->fd_count; i++) {
- char c;
- WSABUF buf;
- DWORD sent;
+ /*
+ * Write FDs are different in the way that it is only flagged by
+ * WSASelectEvent() if we have tried to write to them first. So try an
+ * empty write
+ */
+ if (writefds)
+ {
+ for (i = 0; i < writefds->fd_count; i++)
+ {
+ char c;
+ WSABUF buf;
+ DWORD sent;
buf.buf = &c;
buf.len = 0;
r = WSASend(writefds->fd_array[i], &buf, 1, &sent, 0, NULL, NULL);
- if (r == 0) /* Completed - means things are fine! */
+ if (r == 0) /* Completed - means things are fine! */
FD_SET(writefds->fd_array[i], &outwritefds);
- else { /* Not completed */
+ else
+ { /* Not completed */
if (WSAGetLastError() != WSAEWOULDBLOCK)
- /* Not completed, and not just "would block", so
- * an error occured */
+
+ /*
+ * Not completed, and not just "would block", so an
+ * error occured
+ */
FD_SET(writefds->fd_array[i], &outwritefds);
}
}
- if (outwritefds.fd_count > 0) {
- memcpy(writefds,&outwritefds,sizeof(fd_set));
+ if (outwritefds.fd_count > 0)
+ {
+ memcpy(writefds, &outwritefds, sizeof(fd_set));
if (readfds)
FD_ZERO(readfds);
return outwritefds.fd_count;
}
}
-
+
/* Now set up for an actual select */
- if (timeout != NULL) {
+ if (timeout != NULL)
+ {
/* timeoutval is in milliseconds */
- timeoutval = timeout->tv_sec*1000 + timeout->tv_usec / 1000;
+ timeoutval = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
}
- if (readfds != NULL) {
- for (i=0; i < readfds->fd_count; i++) {
+ if (readfds != NULL)
+ {
+ for (i = 0; i < readfds->fd_count; i++)
+ {
events[numevents] = WSACreateEvent();
sockets[numevents] = readfds->fd_array[i];
numevents++;
}
}
- if (writefds != NULL) {
- for (i=0; i < writefds->fd_count; i++) {
+ if (writefds != NULL)
+ {
+ for (i = 0; i < writefds->fd_count; i++)
+ {
if (!readfds ||
- !FD_ISSET(writefds->fd_array[i], readfds)) {
+ !FD_ISSET(writefds->fd_array[i], readfds))
+ {
/* If the socket is not in the read list */
events[numevents] = WSACreateEvent();
sockets[numevents] = writefds->fd_array[i];
}
}
- for (i=0; i < numevents; i++) {
- int flags = 0;
+ for (i = 0; i < numevents; i++)
+ {
+ int flags = 0;
- if (readfds && FD_ISSET(sockets[i],readfds))
+ if (readfds && FD_ISSET(sockets[i], readfds))
flags |= FD_READ | FD_ACCEPT | FD_CLOSE;
- if (writefds && FD_ISSET(sockets[i],writefds))
+ if (writefds && FD_ISSET(sockets[i], writefds))
flags |= FD_WRITE | FD_CLOSE;
-
- if (WSAEventSelect(sockets[i], events[i], flags) == SOCKET_ERROR) {
+
+ if (WSAEventSelect(sockets[i], events[i], flags) == SOCKET_ERROR)
+ {
TranslateSocketError();
- for (i = 0; i < numevents; i++)
+ for (i = 0; i < numevents; i++)
WSACloseEvent(events[i]);
return -1;
}
}
events[numevents] = pgwin32_signal_event;
- r = WaitForMultipleObjectsEx(numevents+1, events, FALSE, timeoutval, FALSE);
- if (r != WSA_WAIT_TIMEOUT && r != (WAIT_OBJECT_0+numevents)) {
- /* We scan all events, even those not signalled, in case more
- * than one event has been tagged but Wait.. can only return one.
+ r = WaitForMultipleObjectsEx(numevents + 1, events, FALSE, timeoutval, FALSE);
+ if (r != WSA_WAIT_TIMEOUT && r != (WAIT_OBJECT_0 + numevents))
+ {
+ /*
+ * We scan all events, even those not signalled, in case more than
+ * one event has been tagged but Wait.. can only return one.
*/
WSANETWORKEVENTS resEvents;
-
- for (i=0; i < numevents; i++) {
- ZeroMemory(&resEvents,sizeof(resEvents));
- if (WSAEnumNetworkEvents(sockets[i],events[i],&resEvents) == SOCKET_ERROR)
+
+ for (i = 0; i < numevents; i++)
+ {
+ ZeroMemory(&resEvents, sizeof(resEvents));
+ if (WSAEnumNetworkEvents(sockets[i], events[i], &resEvents) == SOCKET_ERROR)
ereport(FATAL,
- (errmsg_internal("failed to enumerate network events: %i",(int)GetLastError())));
+ (errmsg_internal("failed to enumerate network events: %i", (int) GetLastError())));
/* Read activity? */
- if (readfds && FD_ISSET(sockets[i], readfds)) {
+ if (readfds && FD_ISSET(sockets[i], readfds))
+ {
if ((resEvents.lNetworkEvents & FD_READ) ||
(resEvents.lNetworkEvents & FD_ACCEPT) ||
- (resEvents.lNetworkEvents & FD_CLOSE)) {
- FD_SET(sockets[i],&outreadfds);
+ (resEvents.lNetworkEvents & FD_CLOSE))
+ {
+ FD_SET(sockets[i], &outreadfds);
nummatches++;
}
}
/* Write activity? */
- if (writefds && FD_ISSET(sockets[i], writefds)) {
+ if (writefds && FD_ISSET(sockets[i], writefds))
+ {
if ((resEvents.lNetworkEvents & FD_WRITE) ||
- (resEvents.lNetworkEvents & FD_CLOSE)) {
- FD_SET(sockets[i],&outwritefds);
+ (resEvents.lNetworkEvents & FD_CLOSE))
+ {
+ FD_SET(sockets[i], &outwritefds);
nummatches++;
}
}
}
}
-
+
/* Clean up all handles */
- for (i = 0; i < numevents; i++) {
+ for (i = 0; i < numevents; i++)
+ {
WSAEventSelect(sockets[i], events[i], 0);
WSACloseEvent(events[i]);
}
-
- if (r == WSA_WAIT_TIMEOUT) {
+
+ if (r == WSA_WAIT_TIMEOUT)
+ {
if (readfds)
FD_ZERO(readfds);
if (writefds)
return 0;
}
- if (r == WAIT_OBJECT_0+numevents) {
+ if (r == WAIT_OBJECT_0 + numevents)
+ {
pgwin32_dispatch_queued_signals();
errno = EINTR;
if (readfds)
}
-/*
+/*
* Return win32 error string, since strerror can't
- * handle winsock codes
+ * handle winsock codes
*/
static char wserrbuf[256];
const char *
{
static HANDLE handleDLL = INVALID_HANDLE_VALUE;
- if (handleDLL == INVALID_HANDLE_VALUE) {
+ if (handleDLL == INVALID_HANDLE_VALUE)
+ {
handleDLL = LoadLibraryEx("netmsg.dll", NULL, DONT_RESOLVE_DLL_REFERENCES | LOAD_LIBRARY_AS_DATAFILE);
- if (handleDLL == NULL)
+ if (handleDLL == NULL)
ereport(FATAL,
- (errmsg_internal("Failed to load netmsg.dll: %i",(int)GetLastError())));
+ (errmsg_internal("Failed to load netmsg.dll: %i", (int) GetLastError())));
}
ZeroMemory(&wserrbuf, sizeof(wserrbuf));
err,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
wserrbuf,
- sizeof(wserrbuf)-1,
- NULL) == 0) {
+ sizeof(wserrbuf) - 1,
+ NULL) == 0)
+ {
/* Failed to get id */
- sprintf(wserrbuf,"Unknown winsock error %i",err);
+ sprintf(wserrbuf, "Unknown winsock error %i", err);
}
return wserrbuf;
}
-
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.3 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/port/win32/timer.c,v 1.4 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static HANDLE timerHandle = INVALID_HANDLE_VALUE;
-static VOID CALLBACK timer_completion(LPVOID arg, DWORD timeLow, DWORD timeHigh) {
+static VOID CALLBACK
+timer_completion(LPVOID arg, DWORD timeLow, DWORD timeHigh)
+{
pg_queue_signal(SIGALRM);
}
/*
* Limitations of this implementation:
- *
+ *
* - Does not support setting ovalue
* - Does not support interval timer (value->it_interval)
* - Only supports ITIMER_REAL
*/
-int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue) {
+int
+setitimer(int which, const struct itimerval * value, struct itimerval * ovalue)
+{
LARGE_INTEGER dueTime;
Assert(ovalue == NULL);
Assert(value->it_interval.tv_sec == 0 && value->it_interval.tv_usec == 0);
Assert(which == ITIMER_REAL);
- if (timerHandle == INVALID_HANDLE_VALUE) {
+ if (timerHandle == INVALID_HANDLE_VALUE)
+ {
/* First call in this backend, create new timer object */
timerHandle = CreateWaitableTimer(NULL, TRUE, NULL);
if (timerHandle == NULL)
ereport(FATAL,
- (errmsg_internal("failed to create waitable timer: %i",(int)GetLastError())));
+ (errmsg_internal("failed to create waitable timer: %i", (int) GetLastError())));
}
if (value->it_value.tv_sec == 0 &&
- value->it_value.tv_usec == 0) {
+ value->it_value.tv_usec == 0)
+ {
/* Turn timer off */
CancelWaitableTimer(timerHandle);
return 0;
}
/* Negative time to SetWaitableTimer means relative time */
- dueTime.QuadPart = -(value->it_value.tv_usec*10 + value->it_value.tv_sec*10000000L);
+ dueTime.QuadPart = -(value->it_value.tv_usec * 10 + value->it_value.tv_sec * 10000000L);
/* Turn timer on, or change timer */
if (!SetWaitableTimer(timerHandle, &dueTime, 0, timer_completion, NULL, FALSE))
ereport(FATAL,
- (errmsg_internal("failed to set waitable timer: %i",(int)GetLastError())));
+ (errmsg_internal("failed to set waitable timer: %i", (int) GetLastError())));
return 0;
}
* to keep regular backends from having to write out dirty shared buffers
* (which they would only do when needing to free a shared buffer to read in
* another page). In the best scenario all writes from shared buffers will
- * be issued by the background writer process. However, regular backends are
+ * be issued by the background writer process. However, regular backends are
* still empowered to issue writes if the bgwriter fails to maintain enough
* clean shared buffers.
*
- * The bgwriter is also charged with handling all checkpoints. It will
+ * The bgwriter is also charged with handling all checkpoints. It will
* automatically dispatch a checkpoint after a certain amount of time has
* elapsed since the last one, and it can be signaled to perform requested
* checkpoints as well. (The GUC parameter that mandates a checkpoint every
* The bgwriter is started by the postmaster as soon as the startup subprocess
* finishes. It remains alive until the postmaster commands it to terminate.
* Normal termination is by SIGUSR2, which instructs the bgwriter to execute
- * a shutdown checkpoint and then exit(0). (All backends must be stopped
+ * a shutdown checkpoint and then exit(0). (All backends must be stopped
* before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; like any
* backend, the bgwriter will simply abort and exit on SIGQUIT.
*
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.7 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/bgwriter.c,v 1.8 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct
{
- RelFileNode rnode;
- BlockNumber segno;
+ RelFileNode rnode;
+ BlockNumber segno;
/* might add a request-type field later */
} BgWriterRequest;
typedef struct
{
- pid_t bgwriter_pid; /* PID of bgwriter (0 if not started) */
+ pid_t bgwriter_pid; /* PID of bgwriter (0 if not started) */
- sig_atomic_t ckpt_started; /* advances when checkpoint starts */
- sig_atomic_t ckpt_done; /* advances when checkpoint done */
- sig_atomic_t ckpt_failed; /* advances when checkpoint fails */
+ sig_atomic_t ckpt_started; /* advances when checkpoint starts */
+ sig_atomic_t ckpt_done; /* advances when checkpoint done */
+ sig_atomic_t ckpt_failed; /* advances when checkpoint fails */
- int num_requests; /* current # of requests */
- int max_requests; /* allocated array size */
- BgWriterRequest requests[1]; /* VARIABLE LENGTH ARRAY */
+ int num_requests; /* current # of requests */
+ int max_requests; /* allocated array size */
+ BgWriterRequest requests[1]; /* VARIABLE LENGTH ARRAY */
} BgWriterShmemStruct;
static BgWriterShmemStruct *BgWriterShmem;
/*
* Private state
*/
-static bool am_bg_writer = false;
+static bool am_bg_writer = false;
-static bool ckpt_active = false;
+static bool ckpt_active = false;
-static time_t last_checkpoint_time;
+static time_t last_checkpoint_time;
static void bg_quickdie(SIGNAL_ARGS);
* Properly accept or ignore signals the postmaster might send us
*
* Note: we deliberately ignore SIGTERM, because during a standard Unix
- * system shutdown cycle, init will SIGTERM all processes at once. We
- * want to wait for the backends to exit, whereupon the postmaster will
- * tell us it's okay to shut down (via SIGUSR2).
+ * system shutdown cycle, init will SIGTERM all processes at once. We
+ * want to wait for the backends to exit, whereupon the postmaster
+ * will tell us it's okay to shut down (via SIGUSR2).
*
* SIGUSR1 is presently unused; keep it spare in case someday we want
* this process to participate in sinval messaging.
*/
pqsignal(SIGHUP, BgSigHupHandler); /* set flag to read config file */
pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */
- pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
+ pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
pqsignal(SIGQUIT, bg_quickdie); /* hard crash time */
pqsignal(SIGALRM, SIG_IGN);
pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, SIG_IGN); /* reserve for sinval */
+ pqsignal(SIGUSR1, SIG_IGN); /* reserve for sinval */
pqsignal(SIGUSR2, ReqShutdownHandler); /* request shutdown */
/*
#endif
/*
- * Initialize so that first time-driven checkpoint happens
- * at the correct time.
+ * Initialize so that first time-driven checkpoint happens at the
+ * correct time.
*/
last_checkpoint_time = time(NULL);
/*
* These operations are really just a minimal subset of
- * AbortTransaction(). We don't have very many resources
- * to worry about in bgwriter, but we do have LWLocks and buffers.
+ * AbortTransaction(). We don't have very many resources to worry
+ * about in bgwriter, but we do have LWLocks and buffers.
*/
LWLockReleaseAll();
AbortBufferIO();
PG_SETMASK(&UnBlockSig);
/*
- * Loop forever
+ * Loop forever
*/
for (;;)
{
}
/*
- * Do an unforced checkpoint if too much time has elapsed
- * since the last one.
+ * Do an unforced checkpoint if too much time has elapsed since
+ * the last one.
*/
now = time(NULL);
elapsed_secs = now - last_checkpoint_time;
/*
* Ideally we should only warn if this checkpoint was
* requested due to running out of segment files, and not
- * if it was manually requested. However we can't tell the
- * difference with the current signalling mechanism.
+ * if it was manually requested. However we can't tell
+ * the difference with the current signalling mechanism.
*/
if (elapsed_secs < CheckPointWarning)
ereport(LOG,
/*
* Note we record the checkpoint start time not end time as
- * last_checkpoint_time. This is so that time-driven checkpoints
- * happen at a predictable spacing.
+ * last_checkpoint_time. This is so that time-driven
+ * checkpoints happen at a predictable spacing.
*/
last_checkpoint_time = now;
/*
- * After any checkpoint, close all smgr files. This is so we
- * won't hang onto smgr references to deleted files indefinitely.
- * (It is safe to do this because this process does not have a
- * relcache, and so no dangling references could remain.)
+ * After any checkpoint, close all smgr files. This is so we
+ * won't hang onto smgr references to deleted files
+ * indefinitely. (It is safe to do this because this process
+ * does not have a relcache, and so no dangling references
+ * could remain.)
*/
smgrcloseall();
n = 1;
}
else
- {
n = BufferSync(BgWriterPercent, BgWriterMaxPages);
- }
/*
- * Nap for the configured time or sleep for 10 seconds if
- * there was nothing to do at all.
+ * Nap for the configured time or sleep for 10 seconds if there
+ * was nothing to do at all.
*
* On some platforms, signals won't interrupt the sleep. To ensure
- * we respond reasonably promptly when someone signals us,
- * break down the sleep into 1-second increments, and check for
+ * we respond reasonably promptly when someone signals us, break
+ * down the sleep into 1-second increments, and check for
* interrupts after each nap.
*
* We absorb pending requests after each short sleep.
* equal to NBuffers. This may prove too large or small ...
*/
return MAXALIGN(sizeof(BgWriterShmemStruct) +
- (NBuffers - 1) * sizeof(BgWriterRequest));
+ (NBuffers - 1) *sizeof(BgWriterRequest));
}
/*
void
BgWriterShmemInit(void)
{
- bool found;
+ bool found;
BgWriterShmem = (BgWriterShmemStruct *)
ShmemInitStruct("Background Writer Data",
{
/* use volatile pointer to prevent code rearrangement */
volatile BgWriterShmemStruct *bgs = BgWriterShmem;
- sig_atomic_t old_failed = bgs->ckpt_failed;
- sig_atomic_t old_started = bgs->ckpt_started;
+ sig_atomic_t old_failed = bgs->ckpt_failed;
+ sig_atomic_t old_started = bgs->ckpt_started;
/*
- * Send signal to request checkpoint. When waitforit is false,
- * we consider failure to send the signal to be nonfatal.
+ * Send signal to request checkpoint. When waitforit is false, we
+ * consider failure to send the signal to be nonfatal.
*/
if (BgWriterShmem->bgwriter_pid == 0)
elog(waitforit ? ERROR : LOG,
pg_usleep(100000L);
}
old_started = bgs->ckpt_started;
+
/*
- * We are waiting for ckpt_done >= old_started, in a modulo
- * sense. This is a little tricky since we don't know the
- * width or signedness of sig_atomic_t. We make the lowest
- * common denominator assumption that it is only as wide
- * as "char". This means that this algorithm will cope
- * correctly as long as we don't sleep for more than 127
- * completed checkpoints. (If we do, we will get another
- * chance to exit after 128 more checkpoints...)
+ * We are waiting for ckpt_done >= old_started, in a modulo sense.
+ * This is a little tricky since we don't know the width or
+ * signedness of sig_atomic_t. We make the lowest common
+ * denominator assumption that it is only as wide as "char". This
+ * means that this algorithm will cope correctly as long as we
+ * don't sleep for more than 127 completed checkpoints. (If we
+ * do, we will get another chance to exit after 128 more
+ * checkpoints...)
*/
while (((signed char) (bgs->ckpt_done - old_started)) < 0)
{
LWLockRelease(BgWriterCommLock);
for (request = requests; n > 0; request++, n--)
- {
RememberFsyncRequest(request->rnode, request->segno);
- }
if (requests)
pfree(requests);
}
*
* PostgreSQL WAL archiver
*
- * All functions relating to archiver are included here
+ * All functions relating to archiver are included here
*
- * - All functions executed by archiver process
+ * - All functions executed by archiver process
*
- * - archiver is forked from postmaster, and the two
- * processes then communicate using signals. All functions
- * executed by postmaster are included in this file.
+ * - archiver is forked from postmaster, and the two
+ * processes then communicate using signals. All functions
+ * executed by postmaster are included in this file.
*
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.8 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgarch.c,v 1.9 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
#define MIN_XFN_CHARS 16
#define MAX_XFN_CHARS 40
-#define VALID_XFN_CHARS "0123456789ABCDEF.history.backup"
+#define VALID_XFN_CHARS "0123456789ABCDEF.history.backup"
#define NUM_ARCHIVE_RETRIES 3
return 0;
/*
- * Do nothing if too soon since last archiver start. This is a
- * safety valve to protect against continuous respawn attempts if the
- * archiver is dying immediately at launch. Note that since we will
- * be re-called from the postmaster main loop, we will get another
- * chance later.
+ * Do nothing if too soon since last archiver start. This is a safety
+ * valve to protect against continuous respawn attempts if the
+ * archiver is dying immediately at launch. Note that since we will be
+ * re-called from the postmaster main loop, we will get another chance
+ * later.
*/
curtime = time(NULL);
if ((unsigned int) (curtime - last_pgarch_start_time) <
(unsigned int) PGARCH_RESTART_INTERVAL)
- return 0;
+ return 0;
last_pgarch_start_time = curtime;
fflush(stdout);
static pid_t
pgarch_forkexec(void)
{
- char *av[10];
- int ac = 0;
+ char *av[10];
+ int ac = 0;
av[ac++] = "postgres";
return postmaster_forkexec(ac, av);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
NON_EXEC_STATIC void
PgArchiverMain(int argc, char *argv[])
{
- IsUnderPostmaster = true; /* we are a postmaster subprocess now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess now */
- MyProcPid = getpid(); /* reset MyProcPid */
+ MyProcPid = getpid(); /* reset MyProcPid */
/* Lose the postmaster's on-exit routines */
on_exit_reset();
- /*
- * Ignore all signals usually bound to some action in the postmaster,
+ /*
+ * Ignore all signals usually bound to some action in the postmaster,
* except for SIGHUP, SIGUSR1 and SIGQUIT.
- */
- pqsignal(SIGHUP, ArchSigHupHandler);
- pqsignal(SIGINT, SIG_IGN);
- pqsignal(SIGTERM, SIG_IGN);
- pqsignal(SIGQUIT, pgarch_exit);
- pqsignal(SIGALRM, SIG_IGN);
- pqsignal(SIGPIPE, SIG_IGN);
- pqsignal(SIGUSR1, pgarch_waken);
- pqsignal(SIGUSR2, SIG_IGN);
- pqsignal(SIGCHLD, SIG_DFL);
- pqsignal(SIGTTIN, SIG_DFL);
- pqsignal(SIGTTOU, SIG_DFL);
- pqsignal(SIGCONT, SIG_DFL);
- pqsignal(SIGWINCH, SIG_DFL);
- PG_SETMASK(&UnBlockSig);
-
- /*
- * Identify myself via ps
- */
- init_ps_display("archiver process", "", "");
- set_ps_display("");
-
- /* Init XLOG file paths --- needed in EXEC_BACKEND case */
+ */
+ pqsignal(SIGHUP, ArchSigHupHandler);
+ pqsignal(SIGINT, SIG_IGN);
+ pqsignal(SIGTERM, SIG_IGN);
+ pqsignal(SIGQUIT, pgarch_exit);
+ pqsignal(SIGALRM, SIG_IGN);
+ pqsignal(SIGPIPE, SIG_IGN);
+ pqsignal(SIGUSR1, pgarch_waken);
+ pqsignal(SIGUSR2, SIG_IGN);
+ pqsignal(SIGCHLD, SIG_DFL);
+ pqsignal(SIGTTIN, SIG_DFL);
+ pqsignal(SIGTTOU, SIG_DFL);
+ pqsignal(SIGCONT, SIG_DFL);
+ pqsignal(SIGWINCH, SIG_DFL);
+ PG_SETMASK(&UnBlockSig);
+
+ /*
+ * Identify myself via ps
+ */
+ init_ps_display("archiver process", "", "");
+ set_ps_display("");
+
+ /* Init XLOG file paths --- needed in EXEC_BACKEND case */
XLOGPathInit();
- pgarch_MainLoop();
+ pgarch_MainLoop();
- exit(0);
+ exit(0);
}
/* SIGQUIT signal handler for archiver process */
{
/*
* For now, we just nail the doors shut and get out of town. It might
- * seem cleaner to finish up any pending archive copies, but there's
- * a nontrivial risk that init will kill us partway through.
+ * seem cleaner to finish up any pending archive copies, but there's a
+ * nontrivial risk that init will kill us partway through.
*/
- exit(0);
+ exit(0);
}
/* SIGHUP: set flag to re-read config file at next convenient time */
static void
pgarch_MainLoop(void)
{
- time_t last_copy_time = 0;
- time_t curtime;
+ time_t last_copy_time = 0;
+ time_t curtime;
/*
* We run the copy loop immediately upon entry, in case there are
*/
wakened = true;
- do {
+ do
+ {
/* Check for config update */
if (got_SIGHUP)
}
/*
- * There shouldn't be anything for the archiver to do except
- * to wait for a signal, so we could use pause(3) here...
- * ...however, the archiver exists to protect our data, so
- * she wakes up occasionally to allow herself to be proactive.
- * In particular this avoids getting stuck if a signal arrives
- * just before we enter sleep().
+ * There shouldn't be anything for the archiver to do except to
+ * wait for a signal, so we could use pause(3) here... ...however,
+ * the archiver exists to protect our data, so she wakes up
+ * occasionally to allow herself to be proactive. In particular
+ * this avoids getting stuck if a signal arrives just before we
+ * enter sleep().
*/
if (!wakened)
{
(unsigned int) PGARCH_AUTOWAKE_INTERVAL)
wakened = true;
}
- } while (PostmasterIsAlive(true));
+ } while (PostmasterIsAlive(true));
}
/*
static void
pgarch_ArchiverCopyLoop(void)
{
- char xlog[MAX_XFN_CHARS + 1];
-
- /*
- * loop through all xlogs with archive_status of .ready
- * and archive them...mostly we expect this to be a single
- * file, though it is possible some backend will add
- * files onto the list of those that need archiving while we
- * are still copying earlier archives
- */
- while (pgarch_readyXlog(xlog))
+ char xlog[MAX_XFN_CHARS + 1];
+
+ /*
+ * loop through all xlogs with archive_status of .ready and archive
+ * them...mostly we expect this to be a single file, though it is
+ * possible some backend will add files onto the list of those that
+ * need archiving while we are still copying earlier archives
+ */
+ while (pgarch_readyXlog(xlog))
{
- int failures = 0;
+ int failures = 0;
for (;;)
{
static bool
pgarch_archiveXlog(char *xlog)
{
- char xlogarchcmd[MAXPGPATH];
- char pathname[MAXPGPATH];
- char *dp;
- char *endp;
+ char xlogarchcmd[MAXPGPATH];
+ char pathname[MAXPGPATH];
+ char *dp;
+ char *endp;
const char *sp;
- int rc;
+ int rc;
- snprintf(pathname, MAXPGPATH, "%s/%s", XLogDir, xlog);
+ snprintf(pathname, MAXPGPATH, "%s/%s", XLogDir, xlog);
/*
* construct the command to be executed
case 'p':
/* %p: full path of source file */
sp++;
- StrNCpy(dp, pathname, endp-dp);
+ StrNCpy(dp, pathname, endp - dp);
make_native_path(dp);
dp += strlen(dp);
break;
case 'f':
/* %f: filename of source file */
sp++;
- StrNCpy(dp, xlog, endp-dp);
+ StrNCpy(dp, xlog, endp - dp);
dp += strlen(dp);
break;
case '%':
*dp = '\0';
ereport(DEBUG3,
- (errmsg_internal("executing archive command \"%s\"",
+ (errmsg_internal("executing archive command \"%s\"",
xlogarchcmd)));
- rc = system(xlogarchcmd);
- if (rc != 0) {
- ereport(LOG,
+ rc = system(xlogarchcmd);
+ if (rc != 0)
+ {
+ ereport(LOG,
(errmsg("archive command \"%s\" failed: return code %d",
xlogarchcmd, rc)));
- return false;
- }
+ return false;
+ }
ereport(LOG,
- (errmsg("archived transaction log file \"%s\"", xlog)));
+ (errmsg("archived transaction log file \"%s\"", xlog)));
- return true;
+ return true;
}
/*
pgarch_readyXlog(char *xlog)
{
/*
- * open xlog status directory and read through list of
- * xlogs that have the .ready suffix, looking for earliest file.
- * It is possible to optimise this code, though only a single
- * file is expected on the vast majority of calls, so....
+ * open xlog status directory and read through list of xlogs that have
+ * the .ready suffix, looking for earliest file. It is possible to
+ * optimise this code, though only a single file is expected on the
+ * vast majority of calls, so....
*/
char XLogArchiveStatusDir[MAXPGPATH];
- char newxlog[MAX_XFN_CHARS + 6 + 1];
- DIR *rldir;
- struct dirent *rlde;
- bool found = false;
+ char newxlog[MAX_XFN_CHARS + 6 + 1];
+ DIR *rldir;
+ struct dirent *rlde;
+ bool found = false;
- snprintf(XLogArchiveStatusDir, MAXPGPATH, "%s/archive_status", XLogDir);
+ snprintf(XLogArchiveStatusDir, MAXPGPATH, "%s/archive_status", XLogDir);
rldir = AllocateDir(XLogArchiveStatusDir);
if (rldir == NULL)
ereport(ERROR,
- (errcode_for_file_access(),
+ (errcode_for_file_access(),
errmsg("could not open archive status directory \"%s\": %m",
XLogArchiveStatusDir)));
errno = 0;
while ((rlde = readdir(rldir)) != NULL)
{
- int basenamelen = (int) strlen(rlde->d_name) - 6;
+ int basenamelen = (int) strlen(rlde->d_name) - 6;
if (basenamelen >= MIN_XFN_CHARS &&
basenamelen <= MAX_XFN_CHARS &&
strspn(rlde->d_name, VALID_XFN_CHARS) >= basenamelen &&
strcmp(rlde->d_name + basenamelen, ".ready") == 0)
{
- if (!found) {
- strcpy(newxlog, rlde->d_name);
- found = true;
- } else {
- if (strcmp(rlde->d_name, newxlog) < 0)
- strcpy(newxlog, rlde->d_name);
- }
+ if (!found)
+ {
+ strcpy(newxlog, rlde->d_name);
+ found = true;
+ }
+ else
+ {
+ if (strcmp(rlde->d_name, newxlog) < 0)
+ strcpy(newxlog, rlde->d_name);
+ }
}
errno = 0;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
if (errno)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read archive status directory \"%s\": %m",
- XLogArchiveStatusDir)));
+ errmsg("could not read archive status directory \"%s\": %m",
+ XLogArchiveStatusDir)));
FreeDir(rldir);
if (found)
static void
pgarch_archiveDone(char *xlog)
{
- char rlogready[MAXPGPATH];
- char rlogdone[MAXPGPATH];
+ char rlogready[MAXPGPATH];
+ char rlogdone[MAXPGPATH];
StatusFilePath(rlogready, xlog, ".ready");
StatusFilePath(rlogdone, xlog, ".done");
- if (rename(rlogready, rlogdone) < 0)
- ereport(WARNING,
+ if (rename(rlogready, rlogdone) < 0)
+ ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not rename file \"%s\" to \"%s\": %m",
rlogready, rlogdone)));
*
* Copyright (c) 2001-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.79 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/pgstat.c,v 1.80 2004/08/29 05:06:46 momjian Exp $
* ----------
*/
#include "postgres.h"
#define PGSTAT_DESTROY_DELAY 10000 /* How long to keep destroyed
* objects known, to give delayed
- * UDP packets time to arrive;
- * in milliseconds. */
+ * UDP packets time to arrive; in
+ * milliseconds. */
#define PGSTAT_DESTROY_COUNT (PGSTAT_DESTROY_DELAY / PGSTAT_STAT_INTERVAL)
* Local data
* ----------
*/
-NON_EXEC_STATIC int pgStatSock = -1;
+NON_EXEC_STATIC int pgStatSock = -1;
static int pgStatPipe[2];
static struct sockaddr_storage pgStatAddr;
{
STAT_PROC_BUFFER,
STAT_PROC_COLLECTOR
-} STATS_PROCESS_TYPE;
+} STATS_PROCESS_TYPE;
static pid_t pgstat_forkexec(STATS_PROCESS_TYPE procType);
static void pgstat_parseArgs(int argc, char *argv[]);
-
#endif
NON_EXEC_STATIC void PgstatBufferMain(int argc, char *argv[]);
*addr,
hints;
int ret;
- fd_set rset;
+ fd_set rset;
struct timeval tv;
- char test_byte;
- int sel_res;
+ char test_byte;
+ int sel_res;
#define TESTBYTEVAL ((char) 199)
pgstat_collect_startcollector = true;
/*
- * Initialize the filename for the status reports. (In the EXEC_BACKEND
- * case, this only sets the value in the postmaster. The collector
- * subprocess will recompute the value for itself, and individual
- * backends must do so also if they want to access the file.)
+ * Initialize the filename for the status reports. (In the
+ * EXEC_BACKEND case, this only sets the value in the postmaster. The
+ * collector subprocess will recompute the value for itself, and
+ * individual backends must do so also if they want to access the
+ * file.)
*/
snprintf(pgStat_fname, MAXPGPATH, PGSTAT_STAT_FILENAME, DataDir);
/*
* On some platforms, getaddrinfo_all() may return multiple addresses
- * only one of which will actually work (eg, both IPv6 and IPv4 addresses
- * when kernel will reject IPv6). Worse, the failure may occur at the
- * bind() or perhaps even connect() stage. So we must loop through the
- * results till we find a working combination. We will generate LOG
- * messages, but no error, for bogus combinations.
+ * only one of which will actually work (eg, both IPv6 and IPv4
+ * addresses when kernel will reject IPv6). Worse, the failure may
+ * occur at the bind() or perhaps even connect() stage. So we must
+ * loop through the results till we find a working combination. We
+ * will generate LOG messages, but no error, for bogus combinations.
*/
for (addr = addrs; addr; addr = addr->ai_next)
{
if (addr->ai_family == AF_UNIX)
continue;
#endif
+
/*
* Create the socket.
*/
}
/*
- * Bind it to a kernel assigned port on localhost and get the assigned
- * port via getsockname().
+ * Bind it to a kernel assigned port on localhost and get the
+ * assigned port via getsockname().
*/
if (bind(pgStatSock, addr->ai_addr, addr->ai_addrlen) < 0)
{
}
alen = sizeof(pgStatAddr);
- if (getsockname(pgStatSock, (struct sockaddr *) &pgStatAddr, &alen) < 0)
+ if (getsockname(pgStatSock, (struct sockaddr *) & pgStatAddr, &alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
}
/*
- * Connect the socket to its own address. This saves a few cycles by
- * not having to respecify the target address on every send. This also
- * provides a kernel-level check that only packets from this same
- * address will be received.
+ * Connect the socket to its own address. This saves a few cycles
+ * by not having to respecify the target address on every send.
+ * This also provides a kernel-level check that only packets from
+ * this same address will be received.
*/
- if (connect(pgStatSock, (struct sockaddr *) &pgStatAddr, alen) < 0)
+ if (connect(pgStatSock, (struct sockaddr *) & pgStatAddr, alen) < 0)
{
ereport(LOG,
(errcode_for_socket_access(),
/*
* Try to send and receive a one-byte test message on the socket.
* This is to catch situations where the socket can be created but
- * will not actually pass data (for instance, because kernel packet
- * filtering rules prevent it).
+ * will not actually pass data (for instance, because kernel
+ * packet filtering rules prevent it).
*/
test_byte = TESTBYTEVAL;
if (send(pgStatSock, &test_byte, 1, 0) != 1)
}
/*
- * There could possibly be a little delay before the message can be
- * received. We arbitrarily allow up to half a second before deciding
- * it's broken.
+ * There could possibly be a little delay before the message can
+ * be received. We arbitrarily allow up to half a second before
+ * deciding it's broken.
*/
for (;;) /* need a loop to handle EINTR */
{
FD_SET(pgStatSock, &rset);
tv.tv_sec = 0;
tv.tv_usec = 500000;
- sel_res = select(pgStatSock+1, &rset, NULL, NULL, &tv);
+ sel_res = select(pgStatSock + 1, &rset, NULL, NULL, &tv);
if (sel_res >= 0 || errno != EINTR)
break;
}
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
closesocket(pgStatSock);
pgStatSock = -1;
continue;
if (sel_res == 0 || !FD_ISSET(pgStatSock, &rset))
{
/*
- * This is the case we actually think is likely, so take pains to
- * give a specific message for it.
+ * This is the case we actually think is likely, so take pains
+ * to give a specific message for it.
*
* errno will not be set meaningfully here, so don't use it.
*/
continue;
}
- if (test_byte != TESTBYTEVAL) /* strictly paranoia ... */
+ if (test_byte != TESTBYTEVAL) /* strictly paranoia ... */
{
ereport(LOG,
(ERRCODE_INTERNAL_ERROR,
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not set statistics collector socket to nonblocking mode: %m")));
+ errmsg("could not set statistics collector socket to nonblocking mode: %m")));
goto startup_failed;
}
static pid_t
pgstat_forkexec(STATS_PROCESS_TYPE procType)
{
- char *av[10];
- int ac = 0, bufc = 0, i;
- char pgstatBuf[2][32];
+ char *av[10];
+ int ac = 0,
+ bufc = 0,
+ i;
+ char pgstatBuf[2][32];
av[ac++] = "postgres";
av[ac++] = postgres_exec_path;
/* Pipe file ids (those not passed by write_backend_variables) */
- snprintf(pgstatBuf[bufc++],32,"%d",pgStatPipe[0]);
- snprintf(pgstatBuf[bufc++],32,"%d",pgStatPipe[1]);
+ snprintf(pgstatBuf[bufc++], 32, "%d", pgStatPipe[0]);
+ snprintf(pgstatBuf[bufc++], 32, "%d", pgStatPipe[1]);
/* Add to the arg list */
Assert(bufc <= lengthof(pgstatBuf));
Assert(argc == 6);
argc = 3;
- StrNCpy(postgres_exec_path, argv[argc++], MAXPGPATH);
- pgStatPipe[0] = atoi(argv[argc++]);
- pgStatPipe[1] = atoi(argv[argc++]);
+ StrNCpy(postgres_exec_path, argv[argc++], MAXPGPATH);
+ pgStatPipe[0] = atoi(argv[argc++]);
+ pgStatPipe[1] = atoi(argv[argc++]);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/* ----------
{
tsmsg = pgStatTabstatMessages[mb];
- for (i = tsmsg->m_nentries; --i >= 0; )
+ for (i = tsmsg->m_nentries; --i >= 0;)
{
if (tsmsg->m_entry[i].t_id == rel_id)
{
/* unblock will happen in pgstat_recvbuffer */
#ifdef EXEC_BACKEND
- pgstat_parseArgs(argc,argv);
+ pgstat_parseArgs(argc, argv);
#endif
/*
/*
* Reset signal handling. With the exception of restoring default
- * SIGCHLD and SIGQUIT handling, this is a no-op in the non-EXEC_BACKEND
- * case because we'll have inherited these settings from the buffer
- * process; but it's not a no-op for EXEC_BACKEND.
+ * SIGCHLD and SIGQUIT handling, this is a no-op in the
+ * non-EXEC_BACKEND case because we'll have inherited these settings
+ * from the buffer process; but it's not a no-op for EXEC_BACKEND.
*/
pqsignal(SIGHUP, SIG_IGN);
pqsignal(SIGINT, SIG_IGN);
PG_SETMASK(&UnBlockSig);
#ifdef EXEC_BACKEND
- pgstat_parseArgs(argc,argv);
+ pgstat_parseArgs(argc, argv);
#endif
/* Close unwanted files */
/* assume the problem is out-of-memory */
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
{
ereport(LOG,
(errcode(ERRCODE_OUT_OF_MEMORY),
- errmsg("out of memory in statistics collector --- abort")));
+ errmsg("out of memory in statistics collector --- abort")));
exit(1);
}
memset(pgStatBeTable, 0, sizeof(PgStat_StatBeEntry) * MaxBackends);
/*
* Now wait for something to do.
*/
- nready = select(readPipe+1, &rfds, NULL, NULL,
+ nready = select(readPipe + 1, &rfds, NULL, NULL,
(need_statwrite) ? &timeout : NULL);
if (nready < 0)
{
continue;
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in statistics collector: %m")));
+ errmsg("select() failed in statistics collector: %m")));
exit(1);
}
while (nread < targetlen)
{
len = piperead(readPipe, ((char *) &msg) + nread,
- targetlen - nread);
+ targetlen - nread);
if (len < 0)
{
if (errno == EINTR)
{
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not set statistics collector pipe to nonblocking mode: %m")));
+ errmsg("could not set statistics collector pipe to nonblocking mode: %m")));
exit(1);
}
continue;
/*
- * If the postmaster has terminated, we die too. (This is no longer
- * the normal exit path, however.)
+ * If the postmaster has terminated, we die too. (This is no
+ * longer the normal exit path, however.)
*/
if (!PostmasterIsAlive(true))
exit(0);
{
/*
* For now, we just nail the doors shut and get out of town. It might
- * be cleaner to allow any pending messages to be sent, but that creates
- * a tradeoff against speed of exit.
+ * be cleaner to allow any pending messages to be sent, but that
+ * creates a tradeoff against speed of exit.
*/
exit(0);
}
if (msg->m_backendid < 1 || msg->m_backendid > MaxBackends)
{
ereport(LOG,
- (errmsg("invalid server process ID %d", msg->m_backendid)));
+ (errmsg("invalid server process ID %d", msg->m_backendid)));
return -1;
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not open temporary statistics file \"%s\": %m",
- pgStat_tmpfname)));
+ errmsg("could not open temporary statistics file \"%s\": %m",
+ pgStat_tmpfname)));
return;
}
{
ereport(LOG,
(errcode_for_file_access(),
- errmsg("could not close temporary statistics file \"%s\": %m",
- pgStat_tmpfname)));
+ errmsg("could not close temporary statistics file \"%s\": %m",
+ pgStat_tmpfname)));
}
else
{
HASH_REMOVE, NULL) == NULL)
{
ereport(LOG,
- (errmsg("dead-server-process hash table corrupted "
- "during cleanup --- abort")));
+ (errmsg("dead-server-process hash table corrupted "
+ "during cleanup --- abort")));
exit(1);
}
}
static void
backend_read_statsfile(void)
{
- TransactionId topXid = GetTopTransactionId();
+ TransactionId topXid = GetTopTransactionId();
if (!TransactionIdEquals(pgStatDBHashXact, topXid))
{
* message to setup a backend process.
*
* The postmaster also manages system-wide operations such as
- * startup and shutdown. The postmaster itself doesn't do those
+ * startup and shutdown. The postmaster itself doesn't do those
* operations, mind you --- it just forks off a subprocess to do them
* at the right times. It also takes care of resetting the system
* if a backend crashes.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.423 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/postmaster.c,v 1.424 2004/08/29 05:06:46 momjian Exp $
*
* NOTES
*
* Error Reporting:
* Use write_stderr() only for reporting "interactive" errors
* (essentially, bogus arguments on the command line). Once the
- * postmaster is launched, use ereport(). In particular, don't use
+ * postmaster is launched, use ereport(). In particular, don't use
* write_stderr() for anything that occurs after pmdaemonize.
*
*-------------------------------------------------------------------------
*/
static void checkDataDir(const char *checkdir);
static bool onlyConfigSpecified(const char *checkdir);
+
#ifdef USE_RENDEZVOUS
static void reg_reply(DNSServiceRegistrationReplyErrorType errorCode,
- void *context);
+ void *context);
#endif
static void pmdaemonize(void);
static Port *ConnCreate(int serverFd);
static HANDLE *win32_childHNDArray;
static unsigned long win32_numChildren = 0;
-HANDLE PostmasterHandle;
+HANDLE PostmasterHandle;
#endif
static pid_t backend_forkexec(Port *port);
static void ShmemBackendArrayAdd(Backend *bn);
static void ShmemBackendArrayRemove(pid_t pid);
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
#define StartupDataBase() StartChildProcess(BS_XLOG_STARTUP)
#define StartBackgroundWriter() StartChildProcess(BS_XLOG_BGWRITER)
InitializeGUCOptions();
userPGDATA = getenv("PGDATA"); /* default value */
-
+
opterr = 1;
while ((opt = getopt(argc, argv, "A:a:B:b:c:D:d:Fh:ik:lm:MN:no:p:Ss-:")) != -1)
case 'o':
/*
- * Other options to pass to the backend on the command line
+ * Other options to pass to the backend on the command
+ * line
*/
snprintf(ExtraOptions + strlen(ExtraOptions),
sizeof(ExtraOptions) - strlen(ExtraOptions),
if (onlyConfigSpecified(userPGDATA))
{
/*
- * It is either a file name or a directory with no
- * global/pg_control file, and hence not a data directory.
+ * It is either a file name or a directory with no
+ * global/pg_control file, and hence not a data directory.
*/
user_pgconfig = userPGDATA;
ProcessConfigFile(PGC_POSTMASTER);
- if (!guc_pgdata) /* Got a pgdata from the config file? */
+ if (!guc_pgdata) /* Got a pgdata from the config file? */
{
write_stderr("%s does not know where to find the database system data.\n"
- "This should be specified as \"pgdata\" in %s%s.\n",
+ "This should be specified as \"pgdata\" in %s%s.\n",
progname, userPGDATA,
user_pgconfig_is_dir ? "/postgresql.conf" : "");
ExitPostmaster(2);
}
else
{
- /* Now we can set the data directory, and then read postgresql.conf. */
+ /*
+ * Now we can set the data directory, and then read
+ * postgresql.conf.
+ */
checkDataDir(userPGDATA);
SetDataDir(userPGDATA);
ProcessConfigFile(PGC_POSTMASTER);
if (external_pidfile)
{
- FILE *fpidfile = fopen(external_pidfile, "w");
+ FILE *fpidfile = fopen(external_pidfile, "w");
if (fpidfile)
{
}
else
fprintf(stderr,
- gettext("%s could not write to external pid file %s\n"),
- progname, external_pidfile);
+ gettext("%s could not write to external pid file %s\n"),
+ progname, external_pidfile);
}
/* If timezone is not set, determine what the OS uses */
if (find_other_exec(argv[0], "postgres", PG_VERSIONSTR,
postgres_exec_path) < 0)
ereport(FATAL,
- (errmsg("%s: could not locate matching postgres executable",
- progname)));
+ (errmsg("%s: could not locate matching postgres executable",
+ progname)));
#endif
/*
* We want to do this before we try to grab the input sockets, because
* the data directory interlock is more reliable than the socket-file
* interlock (thanks to whoever decided to put socket files in /tmp
- * :-(). For the same reason, it's best to grab the TCP socket(s) before
- * the Unix socket.
+ * :-(). For the same reason, it's best to grab the TCP socket(s)
+ * before the Unix socket.
*/
CreateDataDirLockFile(DataDir, true);
if (ListenAddresses)
{
- char *rawstring;
- List *elemlist;
- ListCell *l;
+ char *rawstring;
+ List *elemlist;
+ ListCell *l;
/* Need a modifiable copy of ListenAddresses */
rawstring = pstrdup(ListenAddresses);
/* Parse string into list of identifiers */
- if (!SplitIdentifierString(rawstring, ',', &elemlist))
+ if (!SplitIdentifierString(rawstring, ',', &elemlist))
{
/* syntax error in list */
ereport(FATAL,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid list syntax for \"listen_addresses\"")));
+ errmsg("invalid list syntax for \"listen_addresses\"")));
}
foreach(l, elemlist)
{
- char *curhost = (char *) lfirst(l);
+ char *curhost = (char *) lfirst(l);
if (strcmp(curhost, "*") == 0)
status = StreamServerPort(AF_UNSPEC, NULL,
BackendList = DLNewList();
#ifdef WIN32
+
/*
* Initialize the child pid/HANDLE arrays for signal handling.
*/
TRUE,
DUPLICATE_SAME_ACCESS) == 0)
ereport(FATAL,
- (errmsg_internal("could not duplicate postmaster handle: %d",
- (int) GetLastError())));
+ (errmsg_internal("could not duplicate postmaster handle: %d",
+ (int) GetLastError())));
#endif
/*
/*
* Reset whereToSendOutput from Debug (its starting state) to None.
* This stops ereport from sending log messages to stderr unless
- * Log_destination permits. We don't do this until the postmaster
- * is fully launched, since startup failures may as well be
- * reported to stderr.
+ * Log_destination permits. We don't do this until the postmaster is
+ * fully launched, since startup failures may as well be reported to
+ * stderr.
*/
whereToSendOutput = None;
static bool
onlyConfigSpecified(const char *checkdir)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
struct stat stat_buf;
- if (checkdir == NULL) /* checkDataDir handles this */
+ if (checkdir == NULL) /* checkDataDir handles this */
return FALSE;
- if (stat(checkdir, &stat_buf) == -1) /* ditto */
+ if (stat(checkdir, &stat_buf) == -1) /* ditto */
return FALSE;
- if (S_ISREG(stat_buf.st_mode)) /* It's a regular file, so assume it's explict */
+ if (S_ISREG(stat_buf.st_mode)) /* It's a regular file, so assume
+ * it's explict */
return TRUE;
- else if (S_ISDIR(stat_buf.st_mode)) /* It's a directory, is it a config or system dir? */
+ else if (S_ISDIR(stat_buf.st_mode)) /* It's a directory, is it a
+ * config or system dir? */
{
snprintf(path, MAXPGPATH, "%s/global/pg_control", checkdir);
/* If this is not found, it is a config-only directory */
{
}
-
-#endif /* USE_RENDEZVOUS */
+#endif /* USE_RENDEZVOUS */
/*
setitimer(ITIMER_PROF, &prof_itimer, NULL);
#endif
- MyProcPid = PostmasterPid = getpid(); /* reset PID vars to child */
+ MyProcPid = PostmasterPid = getpid(); /* reset PID vars to child */
/* GH: If there's no setsid(), we hopefully don't need silent mode.
* Until there's a better solution.
dup2(i, 1);
dup2(i, 2);
close(i);
-#else /* WIN32 */
+#else /* WIN32 */
/* not supported */
elog(FATAL, "SilentMode not supported under WIN32");
-#endif /* WIN32 */
+#endif /* WIN32 */
}
* Wait for something to happen.
*
* We wait at most one minute, to ensure that the other background
- * tasks handled below get done even when no requests are arriving.
+ * tasks handled below get done even when no requests are
+ * arriving.
*/
memcpy((char *) &rmask, (char *) &readmask, sizeof(fd_set));
if (selres > 0)
{
/*
- * Select a random seed at the time of first receiving a request.
+ * Select a random seed at the time of first receiving a
+ * request.
*/
while (random_seed == 0)
{
/*
* We are not sure how much precision is in tv_usec, so we
- * swap the nibbles of 'later' and XOR them with 'earlier'. On
- * the off chance that the result is 0, we loop until it isn't.
+ * swap the nibbles of 'later' and XOR them with
+ * 'earlier'. On the off chance that the result is 0, we
+ * loop until it isn't.
*/
random_seed = earlier.tv_usec ^
((later.tv_usec << 16) |
BackendStartup(port);
/*
- * We no longer need the open socket or port structure
- * in this process
+ * We no longer need the open socket or port
+ * structure in this process
*/
StreamClose(port->sock);
ConnFree(port);
SysLoggerPID = SysLogger_Start();
/*
- * If no background writer process is running, and we are not in
- * a state that prevents it, start one. It doesn't matter if this
+ * If no background writer process is running, and we are not in a
+ * state that prevents it, start one. It doesn't matter if this
* fails, we'll just try again later.
*/
if (BgWriterPID == 0 && StartupPID == 0 && !FatalError)
}
/* If we have lost the archiver, try to start a new one */
- if (XLogArchivingActive() && PgArchPID == 0 &&
- StartupPID == 0 && !FatalError && Shutdown == NoShutdown)
+ if (XLogArchivingActive() && PgArchPID == 0 &&
+ StartupPID == 0 && !FatalError && Shutdown == NoShutdown)
PgArchPID = pgarch_start();
-
+
/* If we have lost the stats collector, try to start a new one */
if (PgStatPID == 0 &&
StartupPID == 0 && !FatalError && Shutdown == NoShutdown)
PgStatPID = pgstat_start();
/*
- * Touch the socket and lock file at least every ten minutes, to ensure
- * that they are not removed by overzealous /tmp-cleaning tasks.
+ * Touch the socket and lock file at least every ten minutes, to
+ * ensure that they are not removed by overzealous /tmp-cleaning
+ * tasks.
*/
now = time(NULL);
if (now - last_touch_time >= 10 * 60)
int backendPID;
long cancelAuthCode;
Backend *bp;
+
#ifndef EXEC_BACKEND
Dlelem *curr;
+
#else
int i;
#endif
switch (postgres_signal_arg)
{
case SIGTERM:
+
/*
* Smart Shutdown:
*
break;
case SIGINT:
+
/*
* Fast Shutdown:
*
/*
* No children left. Begin shutdown of data base system.
*
- * Note: if we previously got SIGTERM then we may send SIGUSR2
- * to the bgwriter a second time here. This should be harmless.
+ * Note: if we previously got SIGTERM then we may send SIGUSR2 to
+ * the bgwriter a second time here. This should be harmless.
*/
if (StartupPID != 0 || FatalError)
break; /* let reaper() handle this */
break;
case SIGQUIT:
+
/*
* Immediate Shutdown:
*
while ((pid = win32_waitpid(&exitstatus)) > 0)
{
/*
- * We need to do this here, and not in CleanupBackend, since this is
- * to be called on all children when we are done with them. Could
- * move to LogChildExit, but that seems like asking for future
- * trouble...
+ * We need to do this here, and not in CleanupBackend, since this
+ * is to be called on all children when we are done with them.
+ * Could move to LogChildExit, but that seems like asking for
+ * future trouble...
*/
win32_RemoveChild(pid);
-#endif /* WIN32 */
-#endif /* HAVE_WAITPID */
+#endif /* WIN32 */
+#endif /* HAVE_WAITPID */
/*
* Check if this child was a startup process.
}
/*
- * Startup succeeded - we are done with system startup or recovery.
+ * Startup succeeded - we are done with system startup or
+ * recovery.
*/
FatalError = false;
/*
- * Crank up the background writer. It doesn't matter if this
+ * Crank up the background writer. It doesn't matter if this
* fails, we'll just try again later.
*/
Assert(BgWriterPID == 0);
/*
* Go to shutdown mode if a shutdown request was pending.
- * Otherwise, try to start the archiver and stats collector too.
+ * Otherwise, try to start the archiver and stats collector
+ * too.
*/
if (Shutdown > NoShutdown && BgWriterPID != 0)
kill(BgWriterPID, SIGUSR2);
- else if (Shutdown == NoShutdown) {
- if (XLogArchivingActive() && PgArchPID == 0)
- PgArchPID = pgarch_start();
- if (PgStatPID == 0)
- PgStatPID = pgstat_start();
- }
+ else if (Shutdown == NoShutdown)
+ {
+ if (XLogArchivingActive() && PgArchPID == 0)
+ PgArchPID = pgarch_start();
+ if (PgStatPID == 0)
+ PgStatPID = pgstat_start();
+ }
continue;
}
!FatalError && !DLGetHead(BackendList))
{
/*
- * Normal postmaster exit is here: we've seen normal
- * exit of the bgwriter after it's been told to shut down.
- * We expect that it wrote a shutdown checkpoint. (If
- * for some reason it didn't, recovery will occur on next
+ * Normal postmaster exit is here: we've seen normal exit
+ * of the bgwriter after it's been told to shut down. We
+ * expect that it wrote a shutdown checkpoint. (If for
+ * some reason it didn't, recovery will occur on next
* postmaster start.)
*
* Note: we do not wait around for exit of the archiver or
* stats processes. They've been sent SIGQUIT by this
- * point, and in any case contain logic to commit hara-kiri
- * if they notice the postmaster is gone.
+ * point, and in any case contain logic to commit
+ * hara-kiri if they notice the postmaster is gone.
*/
ExitPostmaster(0);
}
+
/*
* Any unexpected exit of the bgwriter is treated as a crash.
*/
}
/*
- * Was it the archiver? If so, just try to start a new
- * one; no need to force reset of the rest of the system. (If fail,
- * we'll try again in future cycles of the main loop.)
+ * Was it the archiver? If so, just try to start a new one; no
+ * need to force reset of the rest of the system. (If fail, we'll
+ * try again in future cycles of the main loop.)
*/
if (PgArchPID != 0 && pid == PgArchPID)
{
}
/*
- * Was it the statistics collector? If so, just try to start a new
- * one; no need to force reset of the rest of the system. (If fail,
- * we'll try again in future cycles of the main loop.)
+ * Was it the statistics collector? If so, just try to start a
+ * new one; no need to force reset of the rest of the system. (If
+ * fail, we'll try again in future cycles of the main loop.)
*/
if (PgStatPID != 0 && pid == PgStatPID)
{
{
/*
* Wait for all important children to exit, then reset shmem and
- * StartupDataBase. (We can ignore the archiver and stats processes
- * here since they are not connected to shmem.)
+ * StartupDataBase. (We can ignore the archiver and stats
+ * processes here since they are not connected to shmem.)
*/
if (DLGetHead(BackendList) || StartupPID != 0 || BgWriterPID != 0)
goto reaper_done;
*/
static void
CleanupBackend(int pid,
- int exitstatus) /* child's exit status. */
+ int exitstatus) /* child's exit status. */
{
Dlelem *curr;
/*
* Make log entry unless there was a previous crash (if so, nonzero
- * exit status is to be expected in SIGQUIT response; don't clutter log)
+ * exit status is to be expected in SIGQUIT response; don't clutter
+ * log)
*/
if (!FatalError)
{
LogChildExit(LOG, procname, pid, exitstatus);
ereport(LOG,
- (errmsg("terminating any other active server processes")));
+ (errmsg("terminating any other active server processes")));
}
/* Process regular backends */
pid = backend_forkexec(port);
-#else /* !EXEC_BACKEND */
+#else /* !EXEC_BACKEND */
#ifdef LINUX_PROFILE
proc_exit(BackendRun(port));
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
if (pid < 0)
{
* Build the PostmasterContext (which didn't exist before, in this
* process) to contain the data.
*
- * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
+ * FIXME: [fork/exec] Ugh. Is there a way around this overhead?
*/
#ifdef EXEC_BACKEND
Assert(PostmasterContext == NULL);
}
#endif
- return pid; /* Parent returns pid, or -1 on fork failure */
+ return pid; /* Parent returns pid, or -1 on fork
+ * failure */
}
/*
if (strcmp(argv[1], "-forkcol") == 0)
{
/*
- * Do NOT close postmaster sockets here, because we are forking from
- * pgstat buffer process, which already did it.
+ * Do NOT close postmaster sockets here, because we are forking
+ * from pgstat buffer process, which already did it.
*/
/* Do not want to attach to shared memory */
return 1; /* shouldn't get here */
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/*
* Send SIGUSR1 to archiver process, to wake it up and begin
* archiving next transaction log file.
*/
- kill(PgArchPID, SIGUSR1);
+ kill(PgArchPID, SIGUSR1);
}
- }
+ }
PG_SETMASK(&UnBlockSig);
int cnt = 0;
for (curr = DLGetHead(BackendList); curr; curr = DLGetSucc(curr))
- {
cnt++;
- }
return cnt;
}
/*
* StartChildProcess -- start a non-backend child process for the postmaster
*
- * xlog determines what kind of child will be started. All child types
+ * xlog determines what kind of child will be started. All child types
* initially go to BootstrapMain, which will handle common setup.
*
* Return value of StartChildProcess is subprocess' PID, or 0 if failed
char *av[10];
int ac = 0;
char xlbuf[32];
+
#ifdef LINUX_PROFILE
struct itimerval prof_itimer;
#endif
pid = postmaster_forkexec(ac, av);
-#else /* !EXEC_BACKEND */
+#else /* !EXEC_BACKEND */
#ifdef LINUX_PROFILE
/* see comments in BackendStartup */
beos_backend_startup();
#endif
- IsUnderPostmaster = true; /* we are a postmaster subprocess now */
+ IsUnderPostmaster = true; /* we are a postmaster subprocess
+ * now */
/* Close the postmaster's sockets */
ClosePostmasterPorts(false);
BootstrapMain(ac, av);
ExitPostmaster(0);
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
if (pid < 0)
{
break;
case BS_XLOG_BGWRITER:
ereport(LOG,
- (errmsg("could not fork background writer process: %m")));
+ (errmsg("could not fork background writer process: %m")));
break;
default:
ereport(LOG,
}
/*
- * fork failure is fatal during startup, but there's no need
- * to choke immediately if starting other child types fails.
+ * fork failure is fatal during startup, but there's no need to
+ * choke immediately if starting other child types fails.
*/
if (xlop == BS_XLOG_STARTUP)
ExitPostmaster(1);
#define write_var(var,fp) fwrite((void*)&(var),sizeof(var),1,fp)
#define read_var(var,fp) fread((void*)&(var),sizeof(var),1,fp)
#define write_array_var(var,fp) fwrite((void*)(var),sizeof(var),1,fp)
-#define read_array_var(var,fp) fread((void*)(var),sizeof(var),1,fp)
+#define read_array_var(var,fp) fread((void*)(var),sizeof(var),1,fp)
static bool
write_backend_variables(char *filename, Port *port)
if (!fp)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not read from backend variables file \"%s\": %m",
- filename)));
+ errmsg("could not read from backend variables file \"%s\": %m",
+ filename)));
/* Read vars */
read_var(port->sock, fp);
(errmsg_internal("could not find backend entry with pid %d",
(int) pid)));
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
#ifdef WIN32
HANDLE waiterThread;
/* Format the cmd line */
- cmdLine[sizeof(cmdLine)-1] = '\0';
- cmdLine[sizeof(cmdLine)-2] = '\0';
- snprintf(cmdLine, sizeof(cmdLine)-1, "\"%s\"", path);
+ cmdLine[sizeof(cmdLine) - 1] = '\0';
+ cmdLine[sizeof(cmdLine) - 2] = '\0';
+ snprintf(cmdLine, sizeof(cmdLine) - 1, "\"%s\"", path);
i = 0;
while (argv[++i] != NULL)
{
j = strlen(cmdLine);
- snprintf(cmdLine+j, sizeof(cmdLine)-1-j, " \"%s\"", argv[i]);
+ snprintf(cmdLine + j, sizeof(cmdLine) - 1 - j, " \"%s\"", argv[i]);
}
- if (cmdLine[sizeof(cmdLine)-2] != '\0')
+ if (cmdLine[sizeof(cmdLine) - 2] != '\0')
{
elog(LOG, "subprocess command line too long");
return -1;
(LPVOID) childHandleCopy, 0, NULL);
if (!waiterThread)
ereport(FATAL,
- (errmsg_internal("could not create sigchld waiter thread: %d",
- (int) GetLastError())));
+ (errmsg_internal("could not create sigchld waiter thread: %d",
+ (int) GetLastError())));
CloseHandle(waiterThread);
if (IsUnderPostmaster)
static pid_t
win32_waitpid(int *exitstatus)
{
- /*
- * Note: Do NOT use WaitForMultipleObjectsEx, as we don't want to
- * run queued APCs here.
- */
- int index;
- DWORD exitCode;
- DWORD ret;
+ /*
+ * Note: Do NOT use WaitForMultipleObjectsEx, as we don't want to run
+ * queued APCs here.
+ */
+ int index;
+ DWORD exitCode;
+ DWORD ret;
unsigned long offset;
Assert(win32_childPIDArray && win32_childHNDArray);
for (offset = 0; offset < win32_numChildren; offset += MAXIMUM_WAIT_OBJECTS)
{
unsigned long num = min(MAXIMUM_WAIT_OBJECTS, win32_numChildren - offset);
+
ret = WaitForMultipleObjects(num, &win32_childHNDArray[offset], FALSE, 0);
switch (ret)
{
case WAIT_FAILED:
ereport(LOG,
(errmsg_internal("failed to wait on %lu of %lu children: %d",
- num, win32_numChildren, (int) GetLastError())));
+ num, win32_numChildren, (int) GetLastError())));
return -1;
case WAIT_TIMEOUT:
break;
default:
+
/*
* Get the exit code, and return the PID of, the
* respective process
pg_queue_signal(SIGCHLD);
else
write_stderr("ERROR: failed to wait on child process handle: %d\n",
- (int) GetLastError());
+ (int) GetLastError());
CloseHandle(procHandle);
return 0;
}
-#endif /* WIN32 */
+#endif /* WIN32 */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.6 2004/08/29 00:38:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/postmaster/syslogger.c,v 1.7 2004/08/29 05:06:46 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * GUC parameters. Redirect_stderr cannot be changed after postmaster
+ * GUC parameters. Redirect_stderr cannot be changed after postmaster
* start, but the rest can change at SIGHUP.
*/
bool Redirect_stderr = false;
-int Log_RotationAge = 24*60;
-int Log_RotationSize = 10*1024;
-char * Log_directory = "pg_log";
-char * Log_filename_prefix = "postgresql-";
+int Log_RotationAge = 24 * 60;
+int Log_RotationSize = 10 * 1024;
+char *Log_directory = "pg_log";
+char *Log_filename_prefix = "postgresql-";
/*
* Globally visible state (used by elog.c)
*/
-bool am_syslogger = false;
+bool am_syslogger = false;
/*
* Private state
*/
-static pg_time_t last_rotation_time = 0;
+static pg_time_t last_rotation_time = 0;
-static bool redirection_done = false;
+static bool redirection_done = false;
-static bool pipe_eof_seen = false;
+static bool pipe_eof_seen = false;
static FILE *syslogFile = NULL;
/* These must be exported for EXEC_BACKEND case ... annoying */
#ifndef WIN32
-int syslogPipe[2] = {-1, -1};
+int syslogPipe[2] = {-1, -1};
+
#else
-HANDLE syslogPipe[2] = {0, 0};
+HANDLE syslogPipe[2] = {0, 0};
#endif
#ifdef WIN32
-static HANDLE threadHandle=0;
+static HANDLE threadHandle = 0;
static CRITICAL_SECTION sysfileSection;
#endif
static void syslogger_parseArgs(int argc, char *argv[]);
#endif
static void write_syslogger_file_binary(const char *buffer, int count);
+
#ifdef WIN32
static unsigned int __stdcall pipeThread(void *arg);
#endif
static void logfile_rotate(void);
-static char* logfile_getname(pg_time_t timestamp);
+static char *logfile_getname(pg_time_t timestamp);
static void sigHupHandler(SIGNAL_ARGS);
NON_EXEC_STATIC void
SysLoggerMain(int argc, char *argv[])
{
- char currentLogDir[MAXPGPATH];
+ char currentLogDir[MAXPGPATH];
IsUnderPostmaster = true; /* we are a postmaster subprocess now */
#ifdef EXEC_BACKEND
syslogger_parseArgs(argc, argv);
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
am_syslogger = true;
/*
* If we restarted, our stderr is already redirected into our own
- * input pipe. This is of course pretty useless, not to mention that
+ * input pipe. This is of course pretty useless, not to mention that
* it interferes with detecting pipe EOF. Point stderr to /dev/null.
- * This assumes that all interesting messages generated in the syslogger
- * will come through elog.c and will be sent to write_syslogger_file.
+ * This assumes that all interesting messages generated in the
+ * syslogger will come through elog.c and will be sent to
+ * write_syslogger_file.
*/
if (redirection_done)
{
- int fd = open(NULL_DEV, O_WRONLY);
+ int fd = open(NULL_DEV, O_WRONLY);
/*
- * The closes might look redundant, but they are not: we want to be
- * darn sure the pipe gets closed even if the open failed. We can
- * survive running with stderr pointing nowhere, but we can't afford
- * to have extra pipe input descriptors hanging around.
+ * The closes might look redundant, but they are not: we want to
+ * be darn sure the pipe gets closed even if the open failed. We
+ * can survive running with stderr pointing nowhere, but we can't
+ * afford to have extra pipe input descriptors hanging around.
*/
close(fileno(stdout));
close(fileno(stderr));
/*
* Also close our copy of the write end of the pipe. This is needed
- * to ensure we can detect pipe EOF correctly. (But note that in the
+ * to ensure we can detect pipe EOF correctly. (But note that in the
* restart case, the postmaster already did this.)
*/
#ifndef WIN32
*/
pqsignal(SIGHUP, sigHupHandler); /* set flag to read config file */
- pqsignal(SIGINT, SIG_IGN);
+ pqsignal(SIGINT, SIG_IGN);
pqsignal(SIGTERM, SIG_IGN);
pqsignal(SIGQUIT, SIG_IGN);
pqsignal(SIGALRM, SIG_IGN);
{
unsigned int tid;
- threadHandle = (HANDLE)_beginthreadex(0, 0, pipeThread, 0, 0, &tid);
+ threadHandle = (HANDLE) _beginthreadex(0, 0, pipeThread, 0, 0, &tid);
}
-#endif /* WIN32 */
+#endif /* WIN32 */
/* remember age of initial logfile */
last_rotation_time = time(NULL);
/* main worker loop */
for (;;)
{
- bool rotation_requested = false;
+ bool rotation_requested = false;
+
#ifndef WIN32
- char logbuffer[1024];
- int bytesRead;
- int rc;
+ char logbuffer[1024];
+ int bytesRead;
+ int rc;
fd_set rfds;
struct timeval timeout;
#endif
ProcessConfigFile(PGC_SIGHUP);
/*
- * Check if the log directory changed in postgresql.conf. If so,
- * force rotation to make sure we're writing the logfiles in the
- * right place.
+ * Check if the log directory changed in postgresql.conf. If
+ * so, force rotation to make sure we're writing the logfiles
+ * in the right place.
*
* XXX is it worth responding similarly to a change of
* Log_filename_prefix?
Log_RotationAge > 0)
{
/*
- * Do a logfile rotation if too much time has elapsed
- * since the last one.
+ * Do a logfile rotation if too much time has elapsed since
+ * the last one.
*/
- pg_time_t now = time(NULL);
- int elapsed_secs = now - last_rotation_time;
+ pg_time_t now = time(NULL);
+ int elapsed_secs = now - last_rotation_time;
if (elapsed_secs >= Log_RotationAge * 60)
rotation_requested = true;
logfile_rotate();
#ifndef WIN32
+
/*
* Wait for some data, timing out after 1 second
*/
FD_ZERO(&rfds);
FD_SET(syslogPipe[0], &rfds);
- timeout.tv_sec=1;
- timeout.tv_usec=0;
+ timeout.tv_sec = 1;
+ timeout.tv_usec = 0;
- rc = select(syslogPipe[0]+1, &rfds, NULL, NULL, &timeout);
+ rc = select(syslogPipe[0] + 1, &rfds, NULL, NULL, &timeout);
if (rc < 0)
{
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("select() failed in logger process: %m")));
+ errmsg("select() failed in logger process: %m")));
}
else if (rc > 0 && FD_ISSET(syslogPipe[0], &rfds))
{
- bytesRead = piperead(syslogPipe[0],
+ bytesRead = piperead(syslogPipe[0],
logbuffer, sizeof(logbuffer));
if (bytesRead < 0)
if (errno != EINTR)
ereport(LOG,
(errcode_for_socket_access(),
- errmsg("could not read from logger pipe: %m")));
+ errmsg("could not read from logger pipe: %m")));
}
else if (bytesRead > 0)
{
{
/*
* Zero bytes read when select() is saying read-ready
- * means EOF on the pipe: that is, there are no longer
- * any processes with the pipe write end open. Therefore,
- * the postmaster and all backends are shut down, and we
- * are done.
+ * means EOF on the pipe: that is, there are no longer any
+ * processes with the pipe write end open. Therefore, the
+ * postmaster and all backends are shut down, and we are
+ * done.
*/
pipe_eof_seen = true;
}
}
-#else /* WIN32 */
+#else /* WIN32 */
+
/*
- * On Windows we leave it to a separate thread to transfer data and
- * detect pipe EOF. The main thread just wakes up once a second to
- * check for SIGHUP and rotation conditions.
+ * On Windows we leave it to a separate thread to transfer data
+ * and detect pipe EOF. The main thread just wakes up once a
+ * second to check for SIGHUP and rotation conditions.
*/
pgwin32_backend_usleep(1000000);
-#endif /* WIN32 */
+#endif /* WIN32 */
if (pipe_eof_seen)
{
ereport(LOG,
(errmsg("logger shutting down")));
+
/*
- * Normal exit from the syslogger is here. Note that we
- * deliberately do not close syslogFile before exiting;
- * this is to allow for the possibility of elog messages
- * being generated inside proc_exit. Regular exit() will
- * take care of flushing and closing stdio channels.
+ * Normal exit from the syslogger is here. Note that we
+ * deliberately do not close syslogFile before exiting; this
+ * is to allow for the possibility of elog messages being
+ * generated inside proc_exit. Regular exit() will take care
+ * of flushing and closing stdio channels.
*/
proc_exit(0);
}
int
SysLogger_Start(void)
{
- pid_t sysloggerPid;
- pg_time_t now;
- char *filename;
+ pid_t sysloggerPid;
+ pg_time_t now;
+ char *filename;
if (!Redirect_stderr)
- return 0;
+ return 0;
/*
- * If first time through, create the pipe which will receive stderr output.
+ * If first time through, create the pipe which will receive stderr
+ * output.
*
* If the syslogger crashes and needs to be restarted, we continue to use
- * the same pipe (indeed must do so, since extant backends will be writing
- * into that pipe).
+ * the same pipe (indeed must do so, since extant backends will be
+ * writing into that pipe).
*
* This means the postmaster must continue to hold the read end of the
* pipe open, so we can pass it down to the reincarnated syslogger.
* This is a bit klugy but we have little choice.
*/
#ifndef WIN32
- if (syslogPipe[0] < 0)
+ if (syslogPipe[0] < 0)
{
- if (pgpipe(syslogPipe) < 0)
- ereport(FATAL,
+ if (pgpipe(syslogPipe) < 0)
+ ereport(FATAL,
(errcode_for_socket_access(),
- (errmsg("could not create pipe for syslogging: %m"))));
+ (errmsg("could not create pipe for syslogging: %m"))));
}
#else
- if (!syslogPipe[0])
+ if (!syslogPipe[0])
{
SECURITY_ATTRIBUTES sa;
sa.bInheritHandle = TRUE;
if (!CreatePipe(&syslogPipe[0], &syslogPipe[1], &sa, 32768))
- ereport(FATAL,
+ ereport(FATAL,
(errcode_for_file_access(),
- (errmsg("could not create pipe for syslogging: %m"))));
+ (errmsg("could not create pipe for syslogging: %m"))));
}
#endif
}
/*
- * The initial logfile is created right in the postmaster,
- * to verify that the Log_directory is writable.
+ * The initial logfile is created right in the postmaster, to verify
+ * that the Log_directory is writable.
*/
now = time(NULL);
filename = logfile_getname(now);
syslogFile = fopen(filename, "a");
if (!syslogFile)
- ereport(FATAL,
+ ereport(FATAL,
(errcode_for_file_access(),
(errmsg("could not create logfile \"%s\": %m",
filename))));
close(syslogPipe[1]);
syslogPipe[1] = -1;
#else
- int fd;
+ int fd;
fflush(stderr);
fd = _open_osfhandle((long) syslogPipe[1],
static pid_t
syslogger_forkexec(void)
{
- char *av[10];
- int ac = 0, bufc = 0, i;
- char numbuf[2][32];
+ char *av[10];
+ int ac = 0,
+ bufc = 0,
+ i;
+ char numbuf[2][32];
av[ac++] = "postgres";
av[ac++] = "-forklog";
/* static variables (those not passed by write_backend_variables) */
#ifndef WIN32
if (syslogFile != NULL)
- snprintf(numbuf[bufc++], 32, "%d", fileno(syslogFile));
- else
- strcpy(numbuf[bufc++], "-1");
+ snprintf(numbuf[bufc++], 32, "%d", fileno(syslogFile));
+ else
+ strcpy(numbuf[bufc++], "-1");
snprintf(numbuf[bufc++], 32, "%d", (int) redirection_done);
-#else /* WIN32 */
+#else /* WIN32 */
if (syslogFile != NULL)
- snprintf(numbuf[bufc++], 32, "%ld",
+ snprintf(numbuf[bufc++], 32, "%ld",
_get_osfhandle(_fileno(syslogFile)));
- else
- strcpy(numbuf[bufc++], "0");
+ else
+ strcpy(numbuf[bufc++], "0");
snprintf(numbuf[bufc++], 32, "%d", (int) redirection_done);
-#endif /* WIN32 */
+#endif /* WIN32 */
/* Add to the arg list */
Assert(bufc <= lengthof(numbuf));
static void
syslogger_parseArgs(int argc, char *argv[])
{
- int fd;
+ int fd;
Assert(argc == 5);
argv += 3;
fd = atoi(*argv++);
if (fd != -1)
{
- syslogFile = fdopen(fd, "a");
+ syslogFile = fdopen(fd, "a");
setvbuf(syslogFile, NULL, LBF_MODE, 0);
}
redirection_done = (bool) atoi(*argv++);
-#else /* WIN32 */
+#else /* WIN32 */
fd = atoi(*argv++);
if (fd != 0)
{
}
}
redirection_done = (bool) atoi(*argv++);
-#endif /* WIN32 */
+#endif /* WIN32 */
}
-
-#endif /* EXEC_BACKEND */
+#endif /* EXEC_BACKEND */
/* --------------------------------
write_syslogger_file(const char *buffer, int count)
{
#ifdef WIN32
+
/*
* On Windows we need to do our own newline-to-CRLF translation.
*/
- char convbuf[256];
- char *p;
- int n;
+ char convbuf[256];
+ char *p;
+ int n;
p = convbuf;
n = 0;
while (count-- > 0)
{
- if (*buffer == '\n')
- {
- *p++ = '\r';
+ if (*buffer == '\n')
+ {
+ *p++ = '\r';
n++;
- }
- *p++ = *buffer++;
+ }
+ *p++ = *buffer++;
n++;
if (n >= sizeof(convbuf) - 1)
{
p = convbuf;
n = 0;
}
- }
+ }
if (n > 0)
write_syslogger_file_binary(convbuf, n);
-#else /* !WIN32 */
+#else /* !WIN32 */
write_syslogger_file_binary(buffer, count);
#endif
}
static void
write_syslogger_file_binary(const char *buffer, int count)
{
- int rc;
+ int rc;
#ifndef WIN32
- rc = fwrite(buffer, 1, count, syslogFile);
+ rc = fwrite(buffer, 1, count, syslogFile);
#else
- EnterCriticalSection(&sysfileSection);
- rc = fwrite(buffer, 1, count, syslogFile);
- LeaveCriticalSection(&sysfileSection);
+ EnterCriticalSection(&sysfileSection);
+ rc = fwrite(buffer, 1, count, syslogFile);
+ LeaveCriticalSection(&sysfileSection);
#endif
- if (rc != count)
- ereport(LOG,
+ if (rc != count)
+ ereport(LOG,
(errcode_for_file_access(),
errmsg("could not write to logfile: %m")));
}
static unsigned int __stdcall
pipeThread(void *arg)
{
- DWORD bytesRead;
- char logbuffer[1024];
+ DWORD bytesRead;
+ char logbuffer[1024];
- for (;;)
- {
- if (!ReadFile(syslogPipe[0], logbuffer, sizeof(logbuffer),
+ for (;;)
+ {
+ if (!ReadFile(syslogPipe[0], logbuffer, sizeof(logbuffer),
&bytesRead, 0))
{
- DWORD error = GetLastError();
+ DWORD error = GetLastError();
if (error == ERROR_HANDLE_EOF ||
error == ERROR_BROKEN_PIPE)
(errcode_for_file_access(),
errmsg("could not read from logger pipe: %m")));
}
- else if (bytesRead > 0)
- write_syslogger_file_binary(logbuffer, bytesRead);
- }
+ else if (bytesRead > 0)
+ write_syslogger_file_binary(logbuffer, bytesRead);
+ }
/* We exit the above loop only upon detecting pipe EOF */
pipe_eof_seen = true;
- _endthread();
- return 0;
+ _endthread();
+ return 0;
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
* perform logfile rotation
static void
logfile_rotate(void)
{
- char *filename;
- pg_time_t now;
- FILE *fh;
+ char *filename;
+ pg_time_t now;
+ FILE *fh;
now = time(NULL);
filename = logfile_getname(now);
fh = fopen(filename, "a");
if (!fh)
{
- int saveerrno = errno;
+ int saveerrno = errno;
ereport(LOG,
(errcode_for_file_access(),
filename)));
/*
- * ENFILE/EMFILE are not too surprising on a busy system; just keep
- * using the old file till we manage to get a new one. Otherwise,
- * assume something's wrong with Log_directory and stop trying to
- * create files.
+ * ENFILE/EMFILE are not too surprising on a busy system; just
+ * keep using the old file till we manage to get a new one.
+ * Otherwise, assume something's wrong with Log_directory and stop
+ * trying to create files.
*/
if (saveerrno != ENFILE && saveerrno != EMFILE)
{
ereport(LOG,
- (errmsg("disabling auto rotation (use SIGHUP to reenable)")));
+ (errmsg("disabling auto rotation (use SIGHUP to reenable)")));
Log_RotationAge = 0;
Log_RotationSize = 0;
}
*
* Result is palloc'd.
*/
-static char*
+static char *
logfile_getname(pg_time_t timestamp)
{
- char *filename;
- char stamptext[128];
+ char *filename;
+ char stamptext[128];
pg_strftime(stamptext, sizeof(stamptext), "%Y-%m-%d_%H%M%S",
pg_localtime(×tamp));
static void
sigHupHandler(SIGNAL_ARGS)
{
- got_SIGHUP = true;
+ got_SIGHUP = true;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.97 2004/08/29 04:12:46 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteDefine.c,v 1.98 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
if (event_type == CMD_SELECT)
{
- ListCell *tllist;
+ ListCell *tllist;
int i;
/*
if (!is_instead || query->commandType != CMD_SELECT)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("rules on SELECT must have action INSTEAD SELECT")));
+ errmsg("rules on SELECT must have action INSTEAD SELECT")));
/*
* ... there can be no rule qual, ...
if (heap_getnext(scanDesc, ForwardScanDirection) != NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it is not empty",
- event_obj->relname)));
+ errmsg("could not convert table \"%s\" to a view because it is not empty",
+ event_obj->relname)));
heap_endscan(scanDesc);
if (event_relation->rd_rel->reltriggers != 0)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has triggers",
- event_obj->relname),
- errhint("In particular, the table may not be involved in any foreign key relationships.")));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has triggers",
+ event_obj->relname),
+ errhint("In particular, the table may not be involved in any foreign key relationships.")));
if (event_relation->rd_rel->relhasindex)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has indexes",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has indexes",
+ event_obj->relname)));
if (event_relation->rd_rel->relhassubclass)
ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("could not convert table \"%s\" to a view because it has child tables",
- event_obj->relname)));
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("could not convert table \"%s\" to a view because it has child tables",
+ event_obj->relname)));
RelisBecomingView = true;
}
* We want the rule's table references to be checked as though by the
* rule owner, not the user referencing the rule. Therefore, scan
* through the rule's rtables and set the checkAsUser field on all
- * rtable entries. We have to look at event_qual as well, in case
- * it contains sublinks.
+ * rtable entries. We have to look at event_qual as well, in case it
+ * contains sublinks.
*/
foreach(l, action)
{
* Note: for a view (ON SELECT rule), the checkAsUser field of the *OLD*
* RTE entry will be overridden when the view rule is expanded, and the
* checkAsUser field of the *NEW* entry is irrelevant because that entry's
- * requiredPerms bits will always be zero. However, for other types of rules
+ * requiredPerms bits will always be zero. However, for other types of rules
* it's important to set these fields to match the rule owner. So we just set
* them always.
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.143 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteHandler.c,v 1.144 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static List *adjustJoinTreeList(Query *parsetree, bool removert, int rt_index);
static void rewriteTargetList(Query *parsetree, Relation target_relation);
static TargetEntry *process_matched_tle(TargetEntry *src_tle,
- TargetEntry *prior_tle,
- const char *attrName);
+ TargetEntry *prior_tle,
+ const char *attrName);
static Node *get_assignment_input(Node *node);
static void markQueryForUpdate(Query *qry, bool skipOldNew);
static List *matchLocks(CmdType event, RuleLock *rulelocks,
* action. Some of the entries may be unused after we finish
* rewriting, but we leave them all in place for two reasons:
*
- * * We'd have a much harder job to adjust the query's varnos
- * if we selectively removed RT entries.
+ * We'd have a much harder job to adjust the query's varnos if we
+ * selectively removed RT entries.
*
- * * If the rule is INSTEAD, then the original query won't be
- * executed at all, and so its rtable must be preserved so that
- * the executor will do the correct permissions checks on it.
+ * If the rule is INSTEAD, then the original query won't be executed at
+ * all, and so its rtable must be preserved so that the executor will
+ * do the correct permissions checks on it.
*
* RT entries that are not referenced in the completed jointree will be
* ignored by the planner, so they do not affect query semantics. But
* any permissions checks specified in them will be applied during
- * executor startup (see ExecCheckRTEPerms()). This allows us to check
- * that the caller has, say, insert-permission on a view, when the view
- * is not semantically referenced at all in the resulting query.
+ * executor startup (see ExecCheckRTEPerms()). This allows us to
+ * check that the caller has, say, insert-permission on a view, when
+ * the view is not semantically referenced at all in the resulting
+ * query.
*
* When a rule is not INSTEAD, the permissions checks done on its copied
- * RT entries will be redundant with those done during execution of the
- * original query, but we don't bother to treat that case differently.
+ * RT entries will be redundant with those done during execution of
+ * the original query, but we don't bother to treat that case
+ * differently.
*
* NOTE: because planner will destructively alter rtable, we must ensure
* that rule action's rtable is separate and shares no substructure
* with the main rtable. Hence do a deep copy here.
*/
sub_action->rtable = list_concat((List *) copyObject(parsetree->rtable),
- sub_action->rtable);
+ sub_action->rtable);
/*
* Each rule action's jointree should be the main parsetree's jointree
rtr->rtindex == rt_index)
{
newjointree = list_delete_ptr(newjointree, rtr);
- /* foreach is safe because we exit loop after list_delete... */
+
+ /*
+ * foreach is safe because we exit loop after
+ * list_delete...
+ */
break;
}
}
* assignments appear to occur left-to-right.
*
* For FieldStore, instead of nesting we can generate a single
- * FieldStore with multiple target fields. We must nest when
+ * FieldStore with multiple target fields. We must nest when
* ArrayRefs are involved though.
*----------
*/
priorbottom = prior_input;
for (;;)
{
- Node *newbottom = get_assignment_input(priorbottom);
+ Node *newbottom = get_assignment_input(priorbottom);
if (newbottom == NULL)
break; /* found the original Var reference */
*/
if (IsA(src_expr, FieldStore))
{
- FieldStore *fstore = makeNode(FieldStore);
+ FieldStore *fstore = makeNode(FieldStore);
if (IsA(prior_expr, FieldStore))
{
memcpy(fstore, prior_expr, sizeof(FieldStore));
fstore->newvals =
list_concat(list_copy(((FieldStore *) prior_expr)->newvals),
- list_copy(((FieldStore *) src_expr)->newvals));
+ list_copy(((FieldStore *) src_expr)->newvals));
fstore->fieldnums =
list_concat(list_copy(((FieldStore *) prior_expr)->fieldnums),
- list_copy(((FieldStore *) src_expr)->fieldnums));
+ list_copy(((FieldStore *) src_expr)->fieldnums));
}
else
{
*/
if (product_queries != NIL)
{
- ListCell *n;
- rewrite_event *rev;
+ ListCell *n;
+ rewrite_event *rev;
foreach(n, rewrite_events)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.87 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/rewrite/rewriteManip.c,v 1.88 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* The caller must also provide target_rtable, the rangetable containing
* the target relation (which must be described by the target_varno'th
* RTE in that list). This is needed to handle whole-row Vars referencing
- * the target. We expand such Vars into RowExpr constructs.
+ * the target. We expand such Vars into RowExpr constructs.
*
* Note: the business with inserted_sublink is needed to update hasSubLinks
* in subqueries when the replacement adds a subquery inside a subquery.
if (var->varattno == InvalidAttrNumber)
{
/* Must expand whole-tuple reference into RowExpr */
- RowExpr *rowexpr;
- List *fields;
+ RowExpr *rowexpr;
+ List *fields;
/*
* If generating an expansion for a var of a named rowtype
- * (ie, this is a plain relation RTE), then we must include
- * dummy items for dropped columns. If the var is RECORD
- * (ie, this is a JOIN), then omit dropped columns.
+ * (ie, this is a plain relation RTE), then we must
+ * include dummy items for dropped columns. If the var is
+ * RECORD (ie, this is a JOIN), then omit dropped columns.
*/
expandRTE(context->target_rtable, this_varno, this_varlevelsup,
(var->vartype != RECORDOID),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.67 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_init.c,v 1.68 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* IO_IN_PROGRESS -- this is a flag in the buffer descriptor.
* It must be set when an IO is initiated and cleared at
- * the end of the IO. It is there to make sure that one
+ * the end of the IO. It is there to make sure that one
* process doesn't start to use a buffer while another is
* faulting it in. see IOWait/IOSignal.
*
char *block;
/*
- * It's probably not really necessary to grab the lock --- if there's
- * anyone else attached to the shmem at this point, we've got
- * problems.
+ * It's probably not really necessary to grab the lock --- if
+ * there's anyone else attached to the shmem at this point, we've
+ * got problems.
*/
LWLockAcquire(BufMgrLock, LW_EXCLUSIVE);
block = BufferBlocks;
/*
- * link the buffers into a single linked list. This will become the
- * LIFO list of unused buffers returned by StrategyGetBuffer().
+ * link the buffers into a single linked list. This will become
+ * the LIFO list of unused buffers returned by
+ * StrategyGetBuffer().
*/
for (i = 0; i < NBuffers; block += BLCKSZ, buf++, i++)
{
* routines for finding buffers in the buffer pool.
*
* NOTE: these days, what this table actually provides is a mapping from
- * BufferTags to CDB indexes, not directly to buffers. The function names
+ * BufferTags to CDB indexes, not directly to buffers. The function names
* are thus slight misnomers.
*
* Note: all routines in this file assume that the BufMgrLock is held
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.36 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/buf_table.c,v 1.37 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.174 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.175 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool zero_damaged_pages = false;
#ifdef NOT_USED
-bool ShowPinTrace = false;
+bool ShowPinTrace = false;
#endif
long NDirectFileRead; /* some I/O's are direct file access.
{
ReadBufferCount++;
pgstat_count_buffer_read(&reln->pgstat_info, reln);
+
/*
* lookup the buffer. IO_IN_PROGRESS is set if the requested
* block is not currently in memory.
* it, if it's a shared buffer.
*
* Note: if smgrextend fails, we will end up with a buffer that is
- * allocated but not marked BM_VALID. P_NEW will still select the same
- * block number (because the relation didn't get any longer on disk)
- * and so future attempts to extend the relation will find the same
- * buffer (if it's not been recycled) but come right back here to try
- * smgrextend again.
+ * allocated but not marked BM_VALID. P_NEW will still select the
+ * same block number (because the relation didn't get any longer on
+ * disk) and so future attempts to extend the relation will find the
+ * same buffer (if it's not been recycled) but come right back here to
+ * try smgrextend again.
*/
Assert(!(bufHdr->flags & BM_VALID));
if (!PageHeaderIsValid((PageHeader) MAKE_PTR(bufHdr->data)))
{
/*
- * During WAL recovery, the first access to any data page should
- * overwrite the whole page from the WAL; so a clobbered page
- * header is not reason to fail. Hence, when InRecovery we may
- * always act as though zero_damaged_pages is ON.
+ * During WAL recovery, the first access to any data page
+ * should overwrite the whole page from the WAL; so a
+ * clobbered page header is not reason to fail. Hence, when
+ * InRecovery we may always act as though zero_damaged_pages
+ * is ON.
*/
if (zero_damaged_pages || InRecovery)
{
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("invalid page header in block %u of relation \"%s\"",
- blockNum, RelationGetRelationName(reln))));
+ errmsg("invalid page header in block %u of relation \"%s\"",
+ blockNum, RelationGetRelationName(reln))));
}
}
* if someone were writing it.
*
* Note: it's okay to grab the io_in_progress lock while holding
- * BufMgrLock. All code paths that acquire this lock pin the
- * buffer first; since no one had it pinned (it just came off the
- * free list), no one else can have the lock.
+ * BufMgrLock. All code paths that acquire this lock pin the
+ * buffer first; since no one had it pinned (it just came off
+ * the free list), no one else can have the lock.
*/
StartBufferIO(buf, false);
/*
* Somebody could have allocated another buffer for the same
- * block we are about to read in. While we flush out the
- * dirty buffer, we don't hold the lock and someone could have
+ * block we are about to read in. While we flush out the dirty
+ * buffer, we don't hold the lock and someone could have
* allocated another buffer for the same block. The problem is
* we haven't yet inserted the new tag into the buffer table.
* So we need to check here. -ay 3/95
*
- * Another reason we have to do this is to update cdb_found_index,
- * since the CDB could have disappeared from B1/B2 list while
- * we were writing.
+ * Another reason we have to do this is to update
+ * cdb_found_index, since the CDB could have disappeared from
+ * B1/B2 list while we were writing.
*/
buf2 = StrategyBufferLookup(&newTag, true, &cdb_found_index);
if (buf2 != NULL)
{
/*
- * Found it. Someone has already done what we were about to
- * do. We'll just handle this as if it were found in the
- * buffer pool in the first place. First, give up the
+ * Found it. Someone has already done what we were about
+ * to do. We'll just handle this as if it were found in
+ * the buffer pool in the first place. First, give up the
* buffer we were planning to use.
*/
TerminateBufferIO(buf, 0);
if (!(buf->flags & BM_VALID))
{
/*
- * If we get here, previous attempts to read the buffer
- * must have failed ... but we shall bravely try again.
+ * If we get here, previous attempts to read the
+ * buffer must have failed ... but we shall
+ * bravely try again.
*/
*foundPtr = FALSE;
StartBufferIO(buf, true);
/*
* Tell the buffer replacement strategy that we are replacing the
- * buffer content. Then rename the buffer. Clearing BM_VALID here
- * is necessary, clearing the dirtybits is just paranoia.
+ * buffer content. Then rename the buffer. Clearing BM_VALID here is
+ * necessary, clearing the dirtybits is just paranoia.
*/
StrategyReplaceBuffer(buf, &newTag, cdb_found_index, cdb_replace_index);
buf->tag = newTag;
NBuffers);
/*
- * If called by the background writer, we are usually asked to
- * only write out some portion of dirty buffers now, to prevent
- * the IO storm at checkpoint time.
+ * If called by the background writer, we are usually asked to only
+ * write out some portion of dirty buffers now, to prevent the IO
+ * storm at checkpoint time.
*/
if (percent > 0)
{
/*
* Loop over buffers to be written. Note the BufMgrLock is held at
- * loop top, but is released and reacquired within FlushBuffer,
- * so we aren't holding it long.
+ * loop top, but is released and reacquired within FlushBuffer, so we
+ * aren't holding it long.
*/
for (i = 0; i < num_buffer_dirty; i++)
{
/*
* Check it is still the same page and still needs writing.
*
- * We can check bufHdr->cntxDirty here *without* holding any lock
- * on buffer context as long as we set this flag in access methods
+ * We can check bufHdr->cntxDirty here *without* holding any lock on
+ * buffer context as long as we set this flag in access methods
* *before* logging changes with XLogInsert(): if someone will set
* cntxDirty just after our check we don't worry because of our
* checkpoint.redo points before log record for upcoming changes
if (isCommit)
elog(WARNING,
"buffer refcount leak: [%03d] "
- "(rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
+ "(rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
i,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
buf->tag.rnode.relNode,
XLogFlush(recptr);
/*
- * Now it's safe to write buffer to disk. Note that no one else
- * should have been able to write it while we were busy with
- * locking and log flushing because caller has set the IO flag.
+ * Now it's safe to write buffer to disk. Note that no one else should
+ * have been able to write it while we were busy with locking and log
+ * flushing because caller has set the IO flag.
*
- * It would be better to clear BM_JUST_DIRTIED right here, but we'd
- * have to reacquire the BufMgrLock and it doesn't seem worth it.
+ * It would be better to clear BM_JUST_DIRTIED right here, but we'd have
+ * to reacquire the BufMgrLock and it doesn't seem worth it.
*/
smgrwrite(reln,
buf->tag.blockNum,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.46 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/freelist.c,v 1.47 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* GUC variable: time in seconds between statistics reports */
-int DebugSharedBuffers = 0;
+int DebugSharedBuffers = 0;
/* Pointers to shared state */
-static BufferStrategyControl *StrategyControl = NULL;
-static BufferStrategyCDB *StrategyCDB = NULL;
+static BufferStrategyControl *StrategyControl = NULL;
+static BufferStrategyCDB *StrategyCDB = NULL;
/* Backend-local state about whether currently vacuuming */
-static bool strategy_hint_vacuum = false;
-static TransactionId strategy_vacuum_xid;
+static bool strategy_hint_vacuum = false;
+static TransactionId strategy_vacuum_xid;
#define T1_TARGET (StrategyControl->target_T1_size)
/*
* Macro to remove a CDB from whichever list it currently is on
*/
-#define STRAT_LIST_REMOVE(cdb) \
+#define STRAT_LIST_REMOVE(cdb) \
do { \
Assert((cdb)->list >= 0 && (cdb)->list < STRAT_NUM_LISTS); \
if ((cdb)->prev < 0) \
if (StrategyControl->listTail[(l)] < 0) \
{ \
(cdb)->prev = (cdb)->next = -1; \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
{ \
(cdb)->next = -1; \
(cdb)->prev = StrategyControl->listTail[(l)]; \
- StrategyCDB[StrategyControl->listTail[(l)]].next = \
+ StrategyCDB[StrategyControl->listTail[(l)]].next = \
((cdb) - StrategyCDB); \
- StrategyControl->listTail[(l)] = \
+ StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
StrategyControl->listSize[(l)]++; \
if (StrategyControl->listHead[(l)] < 0) \
{ \
(cdb)->prev = (cdb)->next = -1; \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
StrategyControl->listTail[(l)] = \
((cdb) - StrategyCDB); \
} \
{ \
(cdb)->prev = -1; \
(cdb)->next = StrategyControl->listHead[(l)]; \
- StrategyCDB[StrategyControl->listHead[(l)]].prev = \
+ StrategyCDB[StrategyControl->listHead[(l)]].prev = \
((cdb) - StrategyCDB); \
- StrategyControl->listHead[(l)] = \
+ StrategyControl->listHead[(l)] = \
((cdb) - StrategyCDB); \
} \
StrategyControl->listSize[(l)]++; \
if (StrategyControl->stat_report + DebugSharedBuffers < now)
{
- long all_hit, b1_hit, t1_hit, t2_hit, b2_hit;
- int id, t1_clean, t2_clean;
- ErrorContextCallback *errcxtold;
+ long all_hit,
+ b1_hit,
+ t1_hit,
+ t2_hit,
+ b2_hit;
+ int id,
+ t1_clean,
+ t2_clean;
+ ErrorContextCallback *errcxtold;
id = StrategyControl->listHead[STRAT_LIST_T1];
t1_clean = 0;
}
if (StrategyControl->num_lookup == 0)
- {
all_hit = b1_hit = t1_hit = t2_hit = b2_hit = 0;
- }
else
{
b1_hit = (StrategyControl->num_hit[STRAT_LIST_B1] * 100 /
StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
int *cdb_found_index)
{
- BufferStrategyCDB *cdb;
+ BufferStrategyCDB *cdb;
/* Optional stats printout */
if (DebugSharedBuffers > 0)
StrategyControl->num_hit[cdb->list]++;
/*
- * If this is a T2 hit, we simply move the CDB to the
- * T2 MRU position and return the found buffer.
+ * If this is a T2 hit, we simply move the CDB to the T2 MRU position
+ * and return the found buffer.
*
* A CDB in T2 cannot have t1_vacuum set, so we needn't check. However,
* if the current process is VACUUM then it doesn't promote to MRU.
}
/*
- * If this is a T1 hit, we move the buffer to the T2 MRU only if another
- * transaction had read it into T1, *and* neither transaction is a VACUUM.
- * This is required because any UPDATE or DELETE in PostgreSQL does
- * multiple ReadBuffer(), first during the scan, later during the
- * heap_update() or heap_delete(). Otherwise move to T1 MRU. VACUUM
- * doesn't even get to make that happen.
+ * If this is a T1 hit, we move the buffer to the T2 MRU only if
+ * another transaction had read it into T1, *and* neither transaction
+ * is a VACUUM. This is required because any UPDATE or DELETE in
+ * PostgreSQL does multiple ReadBuffer(), first during the scan, later
+ * during the heap_update() or heap_delete(). Otherwise move to T1
+ * MRU. VACUUM doesn't even get to make that happen.
*/
if (cdb->list == STRAT_LIST_T1)
{
{
STRAT_LIST_REMOVE(cdb);
STRAT_MRU_INSERT(cdb, STRAT_LIST_T1);
+
/*
- * If a non-VACUUM process references a page recently loaded
- * by VACUUM, clear the stigma; the state will now be the
- * same as if this process loaded it originally.
+ * If a non-VACUUM process references a page recently
+ * loaded by VACUUM, clear the stigma; the state will now
+ * be the same as if this process loaded it originally.
*/
if (cdb->t1_vacuum)
{
* adjust the T1target.
*
* Now for this really to end up as a B1 or B2 cache hit, we must have
- * been flushing for quite some time as the block not only must have been
- * read, but also traveled through the queue and evicted from the T cache
- * again already.
+ * been flushing for quite some time as the block not only must have
+ * been read, but also traveled through the queue and evicted from the
+ * T cache again already.
*
* VACUUM re-reads shouldn't adjust the target either.
*/
return NULL;
/*
- * Adjust the target size of the T1 cache depending on if this is
- * a B1 or B2 hit.
+ * Adjust the target size of the T1 cache depending on if this is a B1
+ * or B2 hit.
*/
switch (cdb->list)
{
case STRAT_LIST_B1:
+
/*
- * B1 hit means that the T1 cache is probably too
- * small. Adjust the T1 target size and continue
- * below.
+ * B1 hit means that the T1 cache is probably too small.
+ * Adjust the T1 target size and continue below.
*/
T1_TARGET = Min(T1_TARGET + Max(B2_LENGTH / B1_LENGTH, 1),
NBuffers);
break;
case STRAT_LIST_B2:
- /*
- * B2 hit means that the T2 cache is probably too
- * small. Adjust the T1 target size and continue
- * below.
+
+ /*
+ * B2 hit means that the T2 cache is probably too small.
+ * Adjust the T1 target size and continue below.
*/
T1_TARGET = Max(T1_TARGET - Max(B1_LENGTH / B2_LENGTH, 1), 0);
break;
}
/*
- * Even though we had seen the block in the past, its data is
- * not currently in memory ... cache miss to the bufmgr.
+ * Even though we had seen the block in the past, its data is not
+ * currently in memory ... cache miss to the bufmgr.
*/
return NULL;
}
*
* Called by the bufmgr to get the next candidate buffer to use in
* BufferAlloc(). The only hard requirement BufferAlloc() has is that
- * this buffer must not currently be pinned.
+ * this buffer must not currently be pinned.
*
* *cdb_replace_index is set to the index of the candidate CDB, or -1 if
* none (meaning we are using a previously free buffer). This is not
BufferDesc *
StrategyGetBuffer(int *cdb_replace_index)
{
- int cdb_id;
- BufferDesc *buf;
+ int cdb_id;
+ BufferDesc *buf;
if (StrategyControl->listFreeBuffers < 0)
{
/* There is a completely free buffer available - take it */
/*
- * Note: This code uses the side effect that a free buffer
- * can never be pinned or dirty and therefore the call to
+ * Note: This code uses the side effect that a free buffer can
+ * never be pinned or dirty and therefore the call to
* StrategyReplaceBuffer() will happen without the bufmgr
- * releasing the bufmgr-lock in the meantime. That means,
- * that there will never be any reason to recheck. Otherwise
- * we would leak shared buffers here!
+ * releasing the bufmgr-lock in the meantime. That means, that
+ * there will never be any reason to recheck. Otherwise we would
+ * leak shared buffers here!
*/
*cdb_replace_index = -1;
buf = &BufferDescriptors[StrategyControl->listFreeBuffers];
StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
int cdb_found_index, int cdb_replace_index)
{
- BufferStrategyCDB *cdb_found;
- BufferStrategyCDB *cdb_replace;
+ BufferStrategyCDB *cdb_found;
+ BufferStrategyCDB *cdb_replace;
if (cdb_found_index >= 0)
{
/* Assert that the buffer remembered in cdb_found is the one */
/* the buffer manager is currently faulting in */
Assert(BUFFERTAGS_EQUAL(cdb_found->buf_tag, *newTag));
-
+
if (cdb_replace_index >= 0)
{
/* We are satisfying it with an evicted T buffer */
/* Assert that the buffer remembered in cdb_replace is */
/* the one the buffer manager has just evicted */
- Assert(cdb_replace->list == STRAT_LIST_T1 ||
+ Assert(cdb_replace->list == STRAT_LIST_T1 ||
cdb_replace->list == STRAT_LIST_T2);
Assert(cdb_replace->buf_id == buf->buf_id);
Assert(BUFFERTAGS_EQUAL(cdb_replace->buf_tag, buf->tag));
/*
- * Under normal circumstances we move the evicted T list entry to
- * the corresponding B list. However, T1 entries that exist only
- * because of VACUUM are just thrown into the unused list instead.
- * We don't expect them to be touched again by the VACUUM, and if
- * we put them into B1 then VACUUM would skew T1_target adjusting.
+ * Under normal circumstances we move the evicted T list entry
+ * to the corresponding B list. However, T1 entries that
+ * exist only because of VACUUM are just thrown into the
+ * unused list instead. We don't expect them to be touched
+ * again by the VACUUM, and if we put them into B1 then VACUUM
+ * would skew T1_target adjusting.
*/
if (cdb_replace->t1_vacuum)
{
else
{
/*
- * This was a complete cache miss, so we need to create
- * a new CDB. The goal is to keep T1len+B1len <= c.
+ * This was a complete cache miss, so we need to create a new CDB.
+ * The goal is to keep T1len+B1len <= c.
*/
if (B1_LENGTH > 0 && (T1_LENGTH + B1_LENGTH) >= NBuffers)
{
if (cdb_replace_index >= 0)
{
/*
- * The buffer was formerly in a T list, move its CDB
- * to the corresponding B list
+ * The buffer was formerly in a T list, move its CDB to the
+ * corresponding B list
*/
cdb_replace = &StrategyCDB[cdb_replace_index];
- Assert(cdb_replace->list == STRAT_LIST_T1 ||
+ Assert(cdb_replace->list == STRAT_LIST_T1 ||
cdb_replace->list == STRAT_LIST_T2);
Assert(cdb_replace->buf_id == buf->buf_id);
Assert(BUFFERTAGS_EQUAL(cdb_replace->buf_tag, buf->tag));
STRAT_MRU_INSERT(cdb_found, STRAT_LIST_T1);
/*
- * Remember the Xid when this buffer went onto T1 to avoid
- * a single UPDATE promoting a newcomer straight into T2.
- * Also remember if it was loaded for VACUUM.
+ * Remember the Xid when this buffer went onto T1 to avoid a
+ * single UPDATE promoting a newcomer straight into T2. Also
+ * remember if it was loaded for VACUUM.
*/
cdb_found->t1_xid = GetCurrentTransactionId();
cdb_found->t1_vacuum = strategy_hint_vacuum;
void
StrategyInvalidateBuffer(BufferDesc *buf)
{
- int cdb_id;
- BufferStrategyCDB *cdb;
+ int cdb_id;
+ BufferStrategyCDB *cdb;
/* The buffer cannot be dirty or pinned */
Assert(!(buf->flags & BM_DIRTY) || !(buf->flags & BM_VALID));
cdb = &StrategyCDB[cdb_id];
/*
- * Remove the CDB from the hashtable and the ARC queue it is
- * currently on.
+ * Remove the CDB from the hashtable and the ARC queue it is currently
+ * on.
*/
BufTableDelete(&(cdb->buf_tag));
STRAT_LIST_REMOVE(cdb);
/*
- * Clear out the CDB's buffer tag and association with the buffer
- * and add it to the list of unused CDB's
+ * Clear out the CDB's buffer tag and association with the buffer and
+ * add it to the list of unused CDB's
*/
CLEAR_BUFFERTAG(cdb->buf_tag);
cdb->buf_id = -1;
StrategyControl->listUnusedCDB = cdb_id;
/*
- * Clear out the buffer's tag and add it to the list of
- * currently unused buffers. We must do this to ensure that linear
- * scans of the buffer array don't think the buffer is valid.
+ * Clear out the buffer's tag and add it to the list of currently
+ * unused buffers. We must do this to ensure that linear scans of the
+ * buffer array don't think the buffer is valid.
*/
CLEAR_BUFFERTAG(buf->tag);
buf->flags &= ~(BM_VALID | BM_DIRTY);
StrategyDirtyBufferList(BufferDesc **buffers, BufferTag *buftags,
int max_buffers)
{
- int num_buffer_dirty = 0;
- int cdb_id_t1;
- int cdb_id_t2;
- int buf_id;
- BufferDesc *buf;
+ int num_buffer_dirty = 0;
+ int cdb_id_t1;
+ int cdb_id_t2;
+ int buf_id;
+ BufferDesc *buf;
/*
- * Traverse the T1 and T2 list LRU to MRU in "parallel"
- * and add all dirty buffers found in that order to the list.
- * The ARC strategy keeps all used buffers including pinned ones
- * in the T1 or T2 list. So we cannot miss any dirty buffers.
+ * Traverse the T1 and T2 list LRU to MRU in "parallel" and add all
+ * dirty buffers found in that order to the list. The ARC strategy
+ * keeps all used buffers including pinned ones in the T1 or T2 list.
+ * So we cannot miss any dirty buffers.
*/
cdb_id_t1 = StrategyControl->listHead[STRAT_LIST_T1];
cdb_id_t2 = StrategyControl->listHead[STRAT_LIST_T2];
void
StrategyInitialize(bool init)
{
- bool found;
- int i;
+ bool found;
+ int i;
/*
* Initialize the shared CDB lookup hashtable
Assert(init);
/*
- * Grab the whole linked list of free buffers for our strategy.
- * We assume it was previously set up by InitBufferPool().
+ * Grab the whole linked list of free buffers for our strategy. We
+ * assume it was previously set up by InitBufferPool().
*/
StrategyControl->listFreeBuffers = 0;
/*
- * We start off with a target T1 list size of
- * half the available cache blocks.
+ * We start off with a target T1 list size of half the available
+ * cache blocks.
*/
StrategyControl->target_T1_size = NBuffers / 2;
StrategyControl->listSize[i] = 0;
StrategyControl->num_hit[i] = 0;
}
- StrategyControl->num_lookup = 0;
+ StrategyControl->num_lookup = 0;
StrategyControl->stat_report = 0;
/*
StrategyControl->listUnusedCDB = 0;
}
else
- {
Assert(!init);
- }
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.58 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/buffer/localbuf.c,v 1.59 2004/08/29 05:06:47 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* allocate a local buffer. We do round robin allocation for now.
*
* API is similar to bufmgr.c's BufferAlloc, except that we do not need
- * to have the BufMgrLock since this is all local. Also, IO_IN_PROGRESS
+ * to have the BufMgrLock since this is all local. Also, IO_IN_PROGRESS
* does not get set.
*/
BufferDesc *
LocalRefCount[i]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
- BufferDescriptorGetBuffer(bufHdr));
+ BufferDescriptorGetBuffer(bufHdr));
if (bufHdr->flags & BM_VALID)
*foundPtr = TRUE;
else
bufHdr = &LocalBufferDescriptors[b];
LocalRefCount[b]++;
ResourceOwnerRememberBuffer(CurrentResourceOwner,
- BufferDescriptorGetBuffer(bufHdr));
+ BufferDescriptorGetBuffer(bufHdr));
nextFreeLocalBuf = (b + 1) % NLocBuffer;
break;
}
"local buffer leak: [%03d] (rel=%u/%u/%u, blockNum=%u, flags=0x%x, refcount=%u %d)",
i,
buf->tag.rnode.spcNode, buf->tag.rnode.dbNode,
- buf->tag.rnode.relNode, buf->tag.blockNum, buf->flags,
+ buf->tag.rnode.relNode, buf->tag.blockNum, buf->flags,
buf->refcount, LocalRefCount[i]);
LocalRefCount[i] = 0;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.111 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/file/fd.c,v 1.112 2004/08/29 05:06:47 momjian Exp $
*
* NOTES:
*
* and other code that tries to open files without consulting fd.c. This
* is the number left free. (While we can be pretty sure we won't get
* EMFILE, there's never any guarantee that we won't get ENFILE due to
- * other processes chewing up FDs. So it's a bad idea to try to open files
+ * other processes chewing up FDs. So it's a bad idea to try to open files
* without consulting fd.c. Nonetheless we cannot control all code.)
*
* Because this is just a fixed setting, we are effectively assuming that
* Note: the value of max_files_per_process is taken into account while
* setting this variable, and so need not be tested separately.
*/
-static int max_safe_fds = 32; /* default if not changed */
+static int max_safe_fds = 32; /* default if not changed */
/* Debugging.... */
*/
#define MAX_ALLOCATED_DESCS 32
-typedef enum {
+typedef enum
+{
AllocateDescFile,
AllocateDescDir
} AllocateDescKind;
-typedef struct {
- AllocateDescKind kind;
- union {
- FILE *file;
- DIR *dir;
- } desc;
+typedef struct
+{
+ AllocateDescKind kind;
+ union
+ {
+ FILE *file;
+ DIR *dir;
+ } desc;
TransactionId create_xid;
} AllocateDesc;
-static int numAllocatedDescs = 0;
+static int numAllocatedDescs = 0;
static AllocateDesc allocatedDescs[MAX_ALLOCATED_DESCS];
/*
/* dup until failure ... */
for (;;)
{
- int thisfd;
+ int thisfd;
thisfd = dup(0);
if (thisfd < 0)
pfree(fd);
/*
- * Return results. usable_fds is just the number of successful dups.
- * We assume that the system limit is highestfd+1 (remember 0 is a legal
- * FD number) and so already_open is highestfd+1 - usable_fds.
+ * Return results. usable_fds is just the number of successful dups.
+ * We assume that the system limit is highestfd+1 (remember 0 is a
+ * legal FD number) and so already_open is highestfd+1 - usable_fds.
*/
*usable_fds = used;
- *already_open = highestfd+1 - used;
+ *already_open = highestfd + 1 - used;
}
/*
int already_open;
/*
- * We want to set max_safe_fds to
- * MIN(usable_fds, max_files_per_process - already_open)
- * less the slop factor for files that are opened without consulting
- * fd.c. This ensures that we won't exceed either max_files_per_process
- * or the experimentally-determined EMFILE limit.
+ * We want to set max_safe_fds to MIN(usable_fds,
+ * max_files_per_process - already_open) less the slop factor for
+ * files that are opened without consulting fd.c. This ensures that
+ * we won't exceed either max_files_per_process or the
+ * experimentally-determined EMFILE limit.
*/
count_usable_fds(&usable_fds, &already_open);
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedFiles[]; the test against max_safe_fds prevents AllocateFile
- * from hogging every one of the available FDs, which'd lead to infinite
- * looping.
+ * allocatedFiles[]; the test against max_safe_fds prevents
+ * AllocateFile from hogging every one of the available FDs, which'd
+ * lead to infinite looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
static int
FreeDesc(AllocateDesc *desc)
{
- int result;
+ int result;
/* Close the underlying object */
switch (desc->kind)
DIR *
AllocateDir(const char *dirname)
{
- DIR *dir;
+ DIR *dir;
DO_DB(elog(LOG, "AllocateDir: Allocated %d (%s)",
numAllocatedDescs, dirname));
/*
* The test against MAX_ALLOCATED_DESCS prevents us from overflowing
- * allocatedDescs[]; the test against max_safe_fds prevents AllocateDir
- * from hogging every one of the available FDs, which'd lead to infinite
- * looping.
+ * allocatedDescs[]; the test against max_safe_fds prevents
+ * AllocateDir from hogging every one of the available FDs, which'd
+ * lead to infinite looping.
*/
if (numAllocatedDescs >= MAX_ALLOCATED_DESCS ||
numAllocatedDescs >= max_safe_fds - 1)
void
AtEOSubXact_Files(bool isCommit, TransactionId myXid, TransactionId parentXid)
{
- Index i;
+ Index i;
if (SizeVfdCache > 0)
{
/* no PG_TEMP_FILES_DIR in DataDir in non EXEC_BACKEND case */
|| strcmp(db_de->d_name, "..") == 0
#endif
- )
+ )
continue;
snprintf(temp_path, sizeof(temp_path),
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.33 2004/08/29 04:12:47 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/freespace/freespace.c,v 1.34 2004/08/29 05:06:47 momjian Exp $
*
*
* NOTES:
int MaxFSMPages;
static FSMHeader *FreeSpaceMap; /* points to FSMHeader in shared memory */
-static HTAB *FreeSpaceMapRelHash; /* points to (what used to be) FSMHeader->relHash */
+static HTAB *FreeSpaceMapRelHash; /* points to (what used to be)
+ * FSMHeader->relHash */
static FSMRelation *lookup_fsm_rel(RelFileNode *rel);
{
HASHCTL info;
int nchunks;
- bool found;
+ bool found;
/* Create table header */
- FreeSpaceMap = (FSMHeader *) ShmemInitStruct("Free Space Map Header",sizeof(FSMHeader),&found);
+ FreeSpaceMap = (FSMHeader *) ShmemInitStruct("Free Space Map Header", sizeof(FSMHeader), &found);
if (FreeSpaceMap == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
info.hash = tag_hash;
FreeSpaceMapRelHash = ShmemInitHash("Free Space Map Hash",
- MaxFSMRelations / 10,
- MaxFSMRelations,
- &info,
- (HASH_ELEM | HASH_FUNCTION));
+ MaxFSMRelations / 10,
+ MaxFSMRelations,
+ &info,
+ (HASH_ELEM | HASH_FUNCTION));
if (!FreeSpaceMapRelHash)
ereport(FATAL,
/*
* It's possible that we have to move data down, not up, if the
- * allocations of previous rels expanded. This normally means that
- * our allocation expanded too (or at least got no worse), and
- * ditto for later rels. So there should be room to move all our
- * data down without dropping any --- but we might have to push down
- * following rels to acquire the room. We don't want to do the push
- * more than once, so pack everything against the end of the arena
- * if so.
+ * allocations of previous rels expanded. This normally means
+ * that our allocation expanded too (or at least got no worse),
+ * and ditto for later rels. So there should be room to move all
+ * our data down without dropping any --- but we might have to
+ * push down following rels to acquire the room. We don't want to
+ * do the push more than once, so pack everything against the end
+ * of the arena if so.
*
* In corner cases where we are on the short end of a roundoff choice
* that we were formerly on the long end of, it's possible that we
- * have to move down and compress our data too. In fact, even after
- * pushing down the following rels, there might not be as much space
- * as we computed for this rel above --- that would imply that some
- * following rel(s) are also on the losing end of roundoff choices.
- * We could handle this fairly by doing the per-rel compactions
- * out-of-order, but that seems like way too much complexity to deal
- * with a very infrequent corner case. Instead, we simply drop pages
- * from the end of the current rel's data until it fits.
+ * have to move down and compress our data too. In fact, even
+ * after pushing down the following rels, there might not be as
+ * much space as we computed for this rel above --- that would
+ * imply that some following rel(s) are also on the losing end of
+ * roundoff choices. We could handle this fairly by doing the
+ * per-rel compactions out-of-order, but that seems like way too
+ * much complexity to deal with a very infrequent corner case.
+ * Instead, we simply drop pages from the end of the current rel's
+ * data until it fits.
*/
if (newChunkIndex > oldChunkIndex)
{
{
/* uh-oh, forcibly cut the allocation to fit */
newAlloc = limitChunkIndex - newChunkIndex;
+
/*
- * If newAlloc < 0 at this point, we are moving the rel's
- * firstChunk into territory currently assigned to a later
- * rel. This is okay so long as we do not copy any data.
- * The rels will be back in nondecreasing firstChunk order
- * at completion of the compaction pass.
+ * If newAlloc < 0 at this point, we are moving the
+ * rel's firstChunk into territory currently assigned
+ * to a later rel. This is okay so long as we do not
+ * copy any data. The rels will be back in
+ * nondecreasing firstChunk order at completion of the
+ * compaction pass.
*/
if (newAlloc < 0)
newAlloc = 0;
relNum++;
fprintf(stderr, "Map %d: rel %u/%u/%u isIndex %d avgRequest %u lastPageCount %d nextPage %d\nMap= ",
relNum,
- fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
+ fsmrel->key.spcNode, fsmrel->key.dbNode, fsmrel->key.relNode,
(int) fsmrel->isIndex, fsmrel->avgRequest,
fsmrel->lastPageCount, fsmrel->nextPage);
if (fsmrel->isIndex)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.88 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipc.c,v 1.89 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ----------------------------------------------------------------
*/
void
-on_proc_exit(void (*function) (int code, Datum arg), Datum arg)
+ on_proc_exit(void (*function) (int code, Datum arg), Datum arg)
{
if (on_proc_exit_index >= MAX_ON_EXITS)
ereport(FATAL,
* ----------------------------------------------------------------
*/
void
-on_shmem_exit(void (*function) (int code, Datum arg), Datum arg)
+ on_shmem_exit(void (*function) (int code, Datum arg), Datum arg)
{
if (on_shmem_exit_index >= MAX_ON_EXITS)
ereport(FATAL,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.70 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/ipci.c,v 1.71 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int port)
{
PGShmemHeader *seghdr = NULL;
+
if (!IsUnderPostmaster)
{
- int size;
- int numSemas;
+ int size;
+ int numSemas;
/*
* Size of the Postgres shared-memory block is estimated via
- * moderately-accurate estimates for the big hogs, plus 100K for the
- * stuff that's too small to bother with estimating.
+ * moderately-accurate estimates for the big hogs, plus 100K for
+ * the stuff that's too small to bother with estimating.
*/
size = hash_estimate_size(SHMEM_INDEX_SIZE, sizeof(ShmemIndexEnt));
size += BufferShmemSize();
else
{
/*
- * Attach to the shmem segment.
- * (this should only ever be reached by EXEC_BACKEND code,
- * and only then with makePrivate == false)
+ * Attach to the shmem segment. (this should only ever be reached
+ * by EXEC_BACKEND code, and only then with makePrivate == false)
*/
#ifdef EXEC_BACKEND
Assert(!makePrivate);
BgWriterShmemInit();
#ifdef EXEC_BACKEND
+
/*
* Alloc the win32 shared backend array
*/
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.16 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/pmsignal.c,v 1.17 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
PMSignalInit(void)
{
- bool found;
+ bool found;
+
PMSignalFlags = (sig_atomic_t *)
- ShmemInitStruct("PMSignalFlags",NUM_PMSIGNALS * sizeof(sig_atomic_t),&found);
+ ShmemInitStruct("PMSignalFlags", NUM_PMSIGNALS * sizeof(sig_atomic_t), &found);
if (!found)
MemSet(PMSignalFlags, 0, NUM_PMSIGNALS * sizeof(sig_atomic_t));
else
{
/*
- * Use kill() to see if the postmaster is still alive. This can
- * sometimes give a false positive result, since the postmaster's PID
- * may get recycled, but it is good enough for existing uses by
- * indirect children.
+ * Use kill() to see if the postmaster is still alive. This can
+ * sometimes give a false positive result, since the postmaster's
+ * PID may get recycled, but it is good enough for existing uses
+ * by indirect children.
*/
return (kill(PostmasterPid, 0) == 0);
}
-#else /* WIN32 */
+#else /* WIN32 */
return (WaitForSingleObject(PostmasterHandle, 0) == WAIT_TIMEOUT);
-#endif /* WIN32 */
+#endif /* WIN32 */
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.79 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.80 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
-NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory allocation */
+NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory
+ * allocation */
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
-NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually allocated for ShmemIndex */
+NON_EXEC_STATIC void *ShmemIndexAlloc = NULL; /* Memory actually
+ * allocated for
+ * ShmemIndex */
static HTAB *ShmemIndex = NULL; /* primary index hashtable for shmem */
* Initialize ShmemVariableCache for transaction manager.
*/
ShmemVariableCache = (VariableCache)
- ShmemAlloc(sizeof(*ShmemVariableCache));
+ ShmemAlloc(sizeof(*ShmemVariableCache));
memset(ShmemVariableCache, 0, sizeof(*ShmemVariableCache));
}
}
else
{
/*
- * If the shmem index doesn't exist, we are bootstrapping: we must
- * be trying to init the shmem index itself.
+ * If the shmem index doesn't exist, we are bootstrapping: we
+ * must be trying to init the shmem index itself.
*
- * Notice that the ShmemIndexLock is held until the shmem index has
- * been completely initialized.
+ * Notice that the ShmemIndexLock is held until the shmem index
+ * has been completely initialized.
*/
Assert(strcmp(name, "ShmemIndex") == 0);
Assert(ShmemBootstrap);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.71 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinval.c,v 1.72 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void DisplayXidCache(int code, Datum arg);
-#else /* !XIDCACHE_DEBUG */
+#else /* !XIDCACHE_DEBUG */
#define xc_by_recent_xmin_inc() ((void) 0)
#define xc_by_main_xid_inc() ((void) 0)
#define xc_by_child_xid_inc() ((void) 0)
#define xc_slow_answer_inc() ((void) 0)
-
-#endif /* XIDCACHE_DEBUG */
+#endif /* XIDCACHE_DEBUG */
/*
* Because backends sitting idle will not be reading sinval events, we
* need a way to give an idle backend a swift kick in the rear and make
* it catch up before the sinval queue overflows and forces everyone
- * through a cache reset exercise. This is done by broadcasting SIGUSR1
+ * through a cache reset exercise. This is done by broadcasting SIGUSR1
* to all backends when the queue is threatening to become full.
*
* State for catchup events consists of two flags: one saying whether
#ifdef XIDCACHE_DEBUG
on_proc_exit(DisplayXidCache, (Datum) 0);
-#endif /* XIDCACHE_DEBUG */
+#endif /* XIDCACHE_DEBUG */
}
/*
for (;;)
{
/*
- * We can discard any pending catchup event, since we will not exit
- * this loop until we're fully caught up.
+ * We can discard any pending catchup event, since we will not
+ * exit this loop until we're fully caught up.
*/
catchupInterruptOccurred = 0;
*
* If we are idle (catchupInterruptEnabled is set), we can safely
* invoke ProcessCatchupEvent directly. Otherwise, just set a flag
- * to do it later. (Note that it's quite possible for normal processing
+ * to do it later. (Note that it's quite possible for normal processing
* of the current transaction to cause ReceiveSharedInvalidMessages()
* to be run later on; in that case the flag will get cleared again,
* since there's no longer any reason to do anything.)
* EnableCatchupInterrupt
*
* This is called by the PostgresMain main loop just before waiting
- * for a frontend command. We process any pending catchup events,
+ * for a frontend command. We process any pending catchup events,
* and enable the signal handler to process future events directly.
*
* NOTE: the signal handler starts out disabled, and stays so until
* then test the occurred flag. If we see an unserviced interrupt has
* occurred, we re-clear the enable flag before going off to do the
* service work. (That prevents re-entrant invocation of
- * ProcessCatchupEvent() if another interrupt occurs.) If an
- * interrupt comes in between the setting and clearing of
- * catchupInterruptEnabled, then it will have done the service work and
- * left catchupInterruptOccurred zero, so we have to check again after
- * clearing enable. The whole thing has to be in a loop in case
+ * ProcessCatchupEvent() if another interrupt occurs.) If an interrupt
+ * comes in between the setting and clearing of
+ * catchupInterruptEnabled, then it will have done the service work
+ * and left catchupInterruptOccurred zero, so we have to check again
+ * after clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no
* unserviced interrupt.
break;
catchupInterruptEnabled = 0;
if (catchupInterruptOccurred)
- {
ProcessCatchupEvent();
- }
}
}
* DisableCatchupInterrupt
*
* This is called by the PostgresMain main loop just after receiving
- * a frontend command. Signal handler execution of catchup events
+ * a frontend command. Signal handler execution of catchup events
* is disabled until the next EnableCatchupInterrupt call.
*
* The SIGUSR2 signal handler also needs to call this, so as to
bool
DisableCatchupInterrupt(void)
{
- bool result = (catchupInterruptEnabled != 0);
+ bool result = (catchupInterruptEnabled != 0);
catchupInterruptEnabled = 0;
static void
ProcessCatchupEvent(void)
{
- bool notify_enabled;
+ bool notify_enabled;
/* Must prevent SIGUSR2 interrupt while I am running */
notify_enabled = DisableNotifyInterrupt();
/*
- * What we need to do here is cause ReceiveSharedInvalidMessages()
- * to run, which will do the necessary work and also reset the
- * catchupInterruptOccurred flag. If we are inside a transaction
- * we can just call AcceptInvalidationMessages() to do this. If we
+ * What we need to do here is cause ReceiveSharedInvalidMessages() to
+ * run, which will do the necessary work and also reset the
+ * catchupInterruptOccurred flag. If we are inside a transaction we
+ * can just call AcceptInvalidationMessages() to do this. If we
* aren't, we start and immediately end a transaction; the call to
* AcceptInvalidationMessages() happens down inside transaction start.
*
* It is awfully tempting to just call AcceptInvalidationMessages()
* without the rest of the xact start/stop overhead, and I think that
- * would actually work in the normal case; but I am not sure that things
- * would clean up nicely if we got an error partway through.
+ * would actually work in the normal case; but I am not sure that
+ * things would clean up nicely if we got an error partway through.
*/
if (IsTransactionOrTransactionBlock())
{
* We can find this out cheaply too.
*
* 3. Search the SubTrans tree to find the Xid's topmost parent, and then
- * see if that is running according to PGPROC. This is the slowest, but
+ * see if that is running according to PGPROC. This is the slowest, but
* sadly it has to be done always if the other two failed, unless we see
* that the cached subxact sets are complete (none have overflowed).
*
* SInvalLock has to be held while we do 1 and 2. If we save the top Xids
- * while doing 1, we can release the SInvalLock while we do 3. This buys back
+ * while doing 1, we can release the SInvalLock while we do 3. This buys back
* some concurrency (we can't retrieve the main Xids from PGPROC again anyway;
* see GetNewTransactionId).
*/
bool
TransactionIdIsInProgress(TransactionId xid)
{
- bool result = false;
- SISeg *segP = shmInvalBuffer;
- ProcState *stateP = segP->procState;
- int i,
- j;
- int nxids = 0;
- TransactionId *xids;
- TransactionId topxid;
- bool locked;
+ bool result = false;
+ SISeg *segP = shmInvalBuffer;
+ ProcState *stateP = segP->procState;
+ int i,
+ j;
+ int nxids = 0;
+ TransactionId *xids;
+ TransactionId topxid;
+ bool locked;
/*
* Don't bother checking a very old transaction.
}
/*
- * We can ignore main Xids that are younger than the target Xid,
- * since the target could not possibly be their child.
+ * We can ignore main Xids that are younger than the target
+ * Xid, since the target could not possibly be their child.
*/
if (TransactionIdPrecedes(xid, pxid))
continue;
}
/*
- * Save the main Xid for step 3. We only need to remember main
- * Xids that have uncached children. (Note: there is no race
- * condition here because the overflowed flag cannot be cleared,
- * only set, while we hold SInvalLock. So we can't miss an Xid
- * that we need to worry about.)
+ * Save the main Xid for step 3. We only need to remember
+ * main Xids that have uncached children. (Note: there is no
+ * race condition here because the overflowed flag cannot be
+ * cleared, only set, while we hold SInvalLock. So we can't
+ * miss an Xid that we need to worry about.)
*/
if (proc->subxids.overflowed)
xids[nxids++] = pxid;
locked = false;
/*
- * If none of the relevant caches overflowed, we know the Xid is
- * not running without looking at pg_subtrans.
+ * If none of the relevant caches overflowed, we know the Xid is not
+ * running without looking at pg_subtrans.
*/
if (nxids == 0)
goto result_known;
/*
* Step 3: have to check pg_subtrans.
*
- * At this point, we know it's either a subtransaction of one of the
- * Xids in xids[], or it's not running. If it's an already-failed
- * subtransaction, we want to say "not running" even though its parent may
- * still be running. So first, check pg_clog to see if it's been aborted.
+ * At this point, we know it's either a subtransaction of one of the Xids
+ * in xids[], or it's not running. If it's an already-failed
+ * subtransaction, we want to say "not running" even though its parent
+ * may still be running. So first, check pg_clog to see if it's been
+ * aborted.
*/
xc_slow_answer_inc();
goto result_known;
/*
- * It isn't aborted, so check whether the transaction tree it
- * belongs to is still running (or, more precisely, whether it
- * was running when this routine started -- note that we already
- * released SInvalLock).
+ * It isn't aborted, so check whether the transaction tree it belongs
+ * to is still running (or, more precisely, whether it was running
+ * when this routine started -- note that we already released
+ * SInvalLock).
*/
topxid = SubTransGetTopmostTransaction(xid);
Assert(TransactionIdIsValid(topxid));
int index;
/*
- * Normally we start the min() calculation with our own XID. But
- * if called by checkpointer, we will not be inside a transaction,
- * so use next XID as starting point for min() calculation. (Note
- * that if there are no xacts running at all, that will be the subtrans
+ * Normally we start the min() calculation with our own XID. But if
+ * called by checkpointer, we will not be inside a transaction, so use
+ * next XID as starting point for min() calculation. (Note that if
+ * there are no xacts running at all, that will be the subtrans
* truncation point!)
*/
if (IsTransactionState())
* lastBackend would be sufficient. But it seems better to do the
* malloc while not holding the lock, so we can't look at lastBackend.
*
- * This does open a possibility for avoiding repeated malloc/free:
- * since MaxBackends does not change at runtime, we can simply reuse
- * the previous xip array if any. (This relies on the fact that all
+ * This does open a possibility for avoiding repeated malloc/free: since
+ * MaxBackends does not change at runtime, we can simply reuse the
+ * previous xip array if any. (This relies on the fact that all
* callers pass static SnapshotData structs.)
*/
if (snapshot->xip == NULL)
return (urec);
}
-#endif /* NOT_USED */
+#endif /* NOT_USED */
/*
* BackendIdGetProc - given a BackendId, find its PGPROC structure
* XidCacheRemoveRunningXids
*
* Remove a bunch of TransactionIds from the list of known-running
- * subtransactions for my backend. Both the specified xid and those in
+ * subtransactions for my backend. Both the specified xid and those in
* the xids[] array (of length nxids) are removed from the subxids cache.
*/
void
XidCacheRemoveRunningXids(TransactionId xid, int nxids, TransactionId *xids)
{
- int i, j;
+ int i,
+ j;
Assert(!TransactionIdEquals(xid, InvalidTransactionId));
/*
* We must hold SInvalLock exclusively in order to remove transactions
- * from the PGPROC array. (See notes in GetSnapshotData.) It's
+ * from the PGPROC array. (See notes in GetSnapshotData.) It's
* possible this could be relaxed since we know this routine is only
* used to abort subtransactions, but pending closer analysis we'd
* best be conservative.
LWLockAcquire(SInvalLock, LW_EXCLUSIVE);
/*
- * Under normal circumstances xid and xids[] will be in increasing order,
- * as will be the entries in subxids. Scan backwards to avoid O(N^2)
- * behavior when removing a lot of xids.
+ * Under normal circumstances xid and xids[] will be in increasing
+ * order, as will be the entries in subxids. Scan backwards to avoid
+ * O(N^2) behavior when removing a lot of xids.
*/
for (i = nxids - 1; i >= 0; i--)
{
- TransactionId anxid = xids[i];
+ TransactionId anxid = xids[i];
for (j = MyProc->subxids.nxids - 1; j >= 0; j--)
{
DisplayXidCache(int code, Datum arg)
{
fprintf(stderr,
- "XidCache: xmin: %ld, mainxid: %ld, childxid: %ld, slow: %ld\n",
+ "XidCache: xmin: %ld, mainxid: %ld, childxid: %ld, slow: %ld\n",
xc_by_recent_xmin,
xc_by_main_xid,
xc_by_child_xid,
xc_slow_answer);
}
-#endif /* XIDCACHE_DEBUG */
+#endif /* XIDCACHE_DEBUG */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.56 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/ipc/sinvaladt.c,v 1.57 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int segSize;
SISeg *segP;
int i;
- bool found;
+ bool found;
/* Allocate space in shared memory */
segSize = SInvalShmemSize(maxBackends);
- shmInvalBuffer = segP = (SISeg *) ShmemInitStruct("shmInvalBuffer",segSize,&found);
+ shmInvalBuffer = segP = (SISeg *) ShmemInitStruct("shmInvalBuffer", segSize, &found);
if (found)
return;
/*
* Try to prevent table overflow. When the table is 70% full send a
* WAKEN_CHILDREN request to the postmaster. The postmaster will send
- * a SIGUSR1 signal to all the backends, which will cause sinval.c
- * to read any pending SI entries.
+ * a SIGUSR1 signal to all the backends, which will cause sinval.c to
+ * read any pending SI entries.
*
* This should never happen if all the backends are actively executing
* queries, but if a backend is sitting idle then it won't be starting
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.105 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/large_object/inv_api.c,v 1.106 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
retval->offset = 0;
if (flags & INV_WRITE)
- {
retval->flags = IFS_WRLOCK | IFS_RDLOCK;
- }
else if (flags & INV_READ)
- {
retval->flags = IFS_RDLOCK;
- }
else
elog(ERROR, "invalid flags: %d", flags);
retval->offset = 0;
if (flags & INV_WRITE)
- {
retval->flags = IFS_WRLOCK | IFS_RDLOCK;
- }
else if (flags & INV_READ)
- {
retval->flags = IFS_RDLOCK;
- }
else
elog(ERROR, "invalid flags: %d", flags);
int
inv_drop(Oid lobjId)
{
- Oid classoid;
+ Oid classoid;
LargeObjectDrop(lobjId);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.68 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lmgr.c,v 1.69 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
};
-static LOCKMETHODID LockTableId = INVALID_LOCKMETHOD;
+static LOCKMETHODID LockTableId = INVALID_LOCKMETHOD;
/*
* Create the lock table described by LockConflicts
void
InitLockTable(int maxBackends)
{
- LOCKMETHODID LongTermTableId;
+ LOCKMETHODID LongTermTableId;
/* there's no zero-th table */
NumLockMethods = 1;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.138 2004/08/29 04:12:48 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lock.c,v 1.139 2004/08/29 05:06:48 momjian Exp $
*
* NOTES
* Outside modules can create a lock table and acquire/release
* map from lock method id to the lock table data structures
*/
static LockMethod LockMethods[MAX_LOCK_METHODS];
-static HTAB *LockMethodLockHash[MAX_LOCK_METHODS];
-static HTAB *LockMethodProcLockHash[MAX_LOCK_METHODS];
-static HTAB *LockMethodLocalHash[MAX_LOCK_METHODS];
+static HTAB *LockMethodLockHash[MAX_LOCK_METHODS];
+static HTAB *LockMethodProcLockHash[MAX_LOCK_METHODS];
+static HTAB *LockMethodLocalHash[MAX_LOCK_METHODS];
/* exported so lmgr.c can initialize it */
-int NumLockMethods;
+int NumLockMethods;
/* private state for GrantAwaitedLock */
static ResourceOwner awaitedOwner;
-static const char * const lock_mode_names[] =
+static const char *const lock_mode_names[] =
{
"INVALID",
"AccessShareLock",
|| (Trace_lock_table && (((LOCK *) MAKE_PTR(proclockP->tag.lock))->tag.relId == Trace_lock_table))
)
elog(LOG,
- "%s: proclock(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%x)",
+ "%s: proclock(%lx) lock(%lx) tbl(%d) proc(%lx) xid(%u) hold(%x)",
where, MAKE_OFFSET(proclockP), proclockP->tag.lock,
PROCLOCK_LOCKMETHOD(*(proclockP)),
proclockP->tag.proc, proclockP->tag.xid,
#define LOCK_PRINT(where, lock, type)
#define PROCLOCK_PRINT(where, proclockP)
-
#endif /* not LOCK_DEBUG */
static void RemoveLocalLock(LOCALLOCK *locallock);
static void GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner);
static int WaitOnLock(LOCKMETHODID lockmethodid, LOCALLOCK *locallock,
- ResourceOwner owner);
+ ResourceOwner owner);
static void LockCountMyLocks(SHMEM_OFFSET lockOffset, PGPROC *proc,
int *myHolding);
LockMethod
GetLocksMethodTable(LOCK *lock)
{
- LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
+ LOCKMETHODID lockmethodid = LOCK_LOCKMETHOD(*lock);
Assert(0 < lockmethodid && lockmethodid < NumLockMethods);
return LockMethods[lockmethodid];
if (numModes >= MAX_LOCKMODES)
elog(ERROR, "too many lock types %d (limit is %d)",
- numModes, MAX_LOCKMODES-1);
+ numModes, MAX_LOCKMODES - 1);
/* Compute init/max size to request for lock hashtables */
max_table_size = NLOCKENTS(maxBackends);
sprintf(shmemName, "%s (lock hash)", tabName);
LockMethodLockHash[lockmethodid] = ShmemInitHash(shmemName,
- init_table_size,
- max_table_size,
- &info,
- hash_flags);
+ init_table_size,
+ max_table_size,
+ &info,
+ hash_flags);
if (!LockMethodLockHash[lockmethodid])
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
sprintf(shmemName, "%s (proclock hash)", tabName);
LockMethodProcLockHash[lockmethodid] = ShmemInitHash(shmemName,
- init_table_size,
- max_table_size,
- &info,
- hash_flags);
+ init_table_size,
+ max_table_size,
+ &info,
+ hash_flags);
if (!LockMethodProcLockHash[lockmethodid])
elog(FATAL, "could not initialize lock table \"%s\"", tabName);
/*
- * allocate a non-shared hash table for LOCALLOCK structs. This is used
- * to store lock counts and resource owner information.
+ * allocate a non-shared hash table for LOCALLOCK structs. This is
+ * used to store lock counts and resource owner information.
*
* The non-shared table could already exist in this process (this occurs
- * when the postmaster is recreating shared memory after a backend crash).
- * If so, delete and recreate it. (We could simply leave it, since it
- * ought to be empty in the postmaster, but for safety let's zap it.)
+ * when the postmaster is recreating shared memory after a backend
+ * crash). If so, delete and recreate it. (We could simply leave it,
+ * since it ought to be empty in the postmaster, but for safety let's
+ * zap it.)
*/
if (LockMethodLocalHash[lockmethodid])
hash_destroy(LockMethodLocalHash[lockmethodid]);
LOCKMETHODID
LockMethodTableRename(LOCKMETHODID lockmethodid)
{
- LOCKMETHODID newLockMethodId;
+ LOCKMETHODID newLockMethodId;
if (NumLockMethods >= MAX_LOCK_METHODS)
return INVALID_LOCKMETHOD;
/*
* Find or create a LOCALLOCK entry for this lock and lockmode
*/
- MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
+ MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
localtag.lock = *locktag;
localtag.xid = xid;
localtag.mode = lockmode;
locallock->lockOwners = NULL;
locallock->lockOwners = (LOCALLOCKOWNER *)
MemoryContextAlloc(TopMemoryContext,
- locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
+ locallock->maxLockOwners * sizeof(LOCALLOCKOWNER));
}
else
{
/* Make sure there will be room to remember the lock */
if (locallock->numLockOwners >= locallock->maxLockOwners)
{
- int newsize = locallock->maxLockOwners * 2;
+ int newsize = locallock->maxLockOwners * 2;
locallock->lockOwners = (LOCALLOCKOWNER *)
repalloc(locallock->lockOwners,
}
/*
- * If we already hold the lock, we can just increase the count locally.
+ * If we already hold the lock, we can just increase the count
+ * locally.
*/
if (locallock->nLocks > 0)
{
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->lock = lock;
/*
* Create the hash key for the proclock table.
*/
- MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
+ MemSet(&proclocktag, 0, sizeof(PROCLOCKTAG)); /* must clear padding */
proclocktag.lock = MAKE_OFFSET(lock);
proclocktag.proc = MAKE_OFFSET(MyProc);
TransactionIdStore(xid, &proclocktag.xid);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
- errhint("You may need to increase max_locks_per_transaction.")));
+ errhint("You may need to increase max_locks_per_transaction.")));
}
locallock->proclock = proclock;
Assert((lock->nRequested > 0) && (lock->requested[lockmode] > 0));
/*
- * If this process (under any XID) is a holder of the lock, just
- * grant myself another one without blocking.
+ * If this process (under any XID) is a holder of the lock, just grant
+ * myself another one without blocking.
*/
LockCountMyLocks(proclock->tag.lock, MyProc, myHolding);
if (myHolding[lockmode] > 0)
SHMQueueDelete(&proclock->lockLink);
SHMQueueDelete(&proclock->procLink);
proclock = (PROCLOCK *) hash_search(LockMethodProcLockHash[lockmethodid],
- (void *) &(proclock->tag),
+ (void *) &(proclock->tag),
HASH_REMOVE, NULL);
if (!proclock)
elog(WARNING, "proclock table corrupted");
* Construct bitmask of locks this process holds on this object.
*/
{
- LOCKMASK heldLocks = 0;
+ LOCKMASK heldLocks = 0;
for (i = 1; i <= lockMethodTable->numLockModes; i++)
{
GrantLockLocal(LOCALLOCK *locallock, ResourceOwner owner)
{
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
- int i;
+ int i;
Assert(locallock->numLockOwners < locallock->maxLockOwners);
/* Count the total */
/*
* Find the LOCALLOCK entry for this lock and lockmode
*/
- MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
+ MemSet(&localtag, 0, sizeof(localtag)); /* must clear padding */
localtag.lock = *locktag;
localtag.xid = xid;
localtag.mode = lockmode;
{
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
ResourceOwner owner;
- int i;
+ int i;
/* Session locks and user locks are not transactional */
if (xid != InvalidTransactionId &&
}
/*
- * Decrease the total local count. If we're still holding the lock,
+ * Decrease the total local count. If we're still holding the lock,
* we're done.
*/
locallock->nLocks--;
PROCLOCK_PRINT("LockRelease: found", proclock);
/*
- * Double-check that we are actually holding a lock of the type we want to
- * release.
+ * Double-check that we are actually holding a lock of the type we
+ * want to release.
*/
if (!(proclock->holdMask & LOCKBIT_ON(lockmode)))
{
if (lock->nRequested == 0)
{
/*
- * We've just released the last lock, so garbage-collect the
- * lock object.
+ * We've just released the last lock, so garbage-collect the lock
+ * object.
*/
Assert(SHMQueueEmpty(&(lock->procLocks)));
lock = (LOCK *) hash_search(LockMethodLockHash[lockmethodid],
LockMethod lockMethodTable;
int i,
numLockModes;
- LOCALLOCK *locallock;
+ LOCALLOCK *locallock;
PROCLOCK *proclock;
LOCK *lock;
/*
* First we run through the locallock table and get rid of unwanted
* entries, then we scan the process's proclocks and get rid of those.
- * We do this separately because we may have multiple locallock entries
- * pointing to the same proclock, and we daren't end up with any
- * dangling pointers.
+ * We do this separately because we may have multiple locallock
+ * entries pointing to the same proclock, and we daren't end up with
+ * any dangling pointers.
*/
hash_seq_init(&status, LockMethodLocalHash[lockmethodid]);
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
continue;
- /* Ignore locks with Xid=0 unless we are asked to release all locks */
+ /*
+ * Ignore locks with Xid=0 unless we are asked to release all
+ * locks
+ */
if (TransactionIdEquals(locallock->tag.xid, InvalidTransactionId)
&& !allxids)
continue;
if (LOCK_LOCKMETHOD(*lock) != lockmethodid)
goto next_item;
- /* Ignore locks with Xid=0 unless we are asked to release all locks */
+ /*
+ * Ignore locks with Xid=0 unless we are asked to release all
+ * locks
+ */
if (TransactionIdEquals(proclock->tag.xid, InvalidTransactionId)
&& !allxids)
goto next_item;
LockReleaseCurrentOwner(void)
{
HASH_SEQ_STATUS status;
- LOCALLOCK *locallock;
+ LOCALLOCK *locallock;
LOCALLOCKOWNER *lockOwners;
int i;
{
ResourceOwner parent = ResourceOwnerGetParent(CurrentResourceOwner);
HASH_SEQ_STATUS status;
- LOCALLOCK *locallock;
+ LOCALLOCK *locallock;
LOCALLOCKOWNER *lockOwners;
Assert(parent != NULL);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.23 2004/08/29 04:12:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.24 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
bool
LWLockHeldByMe(LWLockId lockid)
{
- int i;
+ int i;
for (i = 0; i < num_held_lwlocks; i++)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.152 2004/08/29 04:12:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/proc.c,v 1.153 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
InitProcGlobal(int maxBackends)
{
- bool foundProcGlobal, foundDummy;
+ bool foundProcGlobal,
+ foundDummy;
/* Create or attach to the ProcGlobal shared structure */
ProcGlobal = (PROC_HDR *)
void
InitDummyProcess(int proctype)
{
- PGPROC *dummyproc;
+ PGPROC *dummyproc;
/*
* ProcGlobal should be set by a previous call to InitProcGlobal (we
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
- * granted us the lock, or perhaps they detected a deadlock.
- * If they did grant us the lock, we'd better remember it in
- * our local lock table.
+ * granted us the lock, or perhaps they detected a deadlock. If
+ * they did grant us the lock, we'd better remember it in our
+ * local lock table.
*/
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
static void
DummyProcKill(int code, Datum arg)
{
- int proctype = DatumGetInt32(arg);
- PGPROC *dummyproc;
+ int proctype = DatumGetInt32(arg);
+ PGPROC *dummyproc;
Assert(proctype >= 0 && proctype < NUM_DUMMY_PROCS);
/*
* Set timer so we can wake up after awhile and check for a deadlock.
* If a deadlock is detected, the handler releases the process's
- * semaphore and sets MyProc->waitStatus = STATUS_ERROR, allowing us to
- * know that we must report failure rather than success.
+ * semaphore and sets MyProc->waitStatus = STATUS_ERROR, allowing us
+ * to know that we must report failure rather than success.
*
* By delaying the check until we've waited for a bit, we can avoid
* running the rather expensive deadlock-check code in most cases.
RemoveFromWaitQueue(MyProc);
/*
- * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will report
- * an error after we return from the signal handler.
+ * Set MyProc->waitStatus to STATUS_ERROR so that ProcSleep will
+ * report an error after we return from the signal handler.
*/
MyProc->waitStatus = STATUS_ERROR;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.29 2004/08/29 04:12:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/lmgr/s_lock.c,v 1.30 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* (and thus the probability of unintended failure) than to fix the
* total time spent.
*
- * The pg_usleep() delays are measured in centiseconds (0.01 sec) because 10
- * msec is a common resolution limit at the OS level.
+ * The pg_usleep() delays are measured in centiseconds (0.01 sec) because
+ * 10 msec is a common resolution limit at the OS level.
*/
#define SPINS_PER_DELAY 100
#define NUM_DELAYS 1000
*/
-#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
+#ifdef HAVE_SPINLOCKS /* skip spinlocks if requested */
#if defined(__GNUC__)
}
#endif /* __sparc || __sparc__ */
-
#endif /* not __GNUC__ */
-
-#endif /* HAVE_SPINLOCKS */
+#endif /* HAVE_SPINLOCKS */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.60 2004/08/29 04:12:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/page/bufpage.c,v 1.61 2004/08/29 05:06:48 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
/* Sort in decreasing itemoff order */
return ((itemIdSort) itemidp2)->itemoff -
- ((itemIdSort) itemidp1)->itemoff;
+ ((itemIdSort) itemidp1)->itemoff;
}
/*
for (i = 0; i < nline; i++)
{
lp = PageGetItemId(page, i + 1);
- if (lp->lp_flags & LP_DELETE) /* marked for deletion */
+ if (lp->lp_flags & LP_DELETE) /* marked for deletion */
lp->lp_flags &= ~(LP_USED | LP_DELETE);
if (lp->lp_flags & LP_USED)
nused++;
for (i = 0; i < nline; i++)
{
lp = PageGetItemId(page, i + 1);
- lp->lp_len = 0; /* indicate unused & deallocated */
+ lp->lp_len = 0; /* indicate unused & deallocated */
}
((PageHeader) page)->pd_upper = pd_special;
}
}
else
{
- lp->lp_len = 0; /* indicate unused & deallocated */
+ lp->lp_len = 0; /* indicate unused & deallocated */
}
}
nline--; /* there's one less than when we started */
for (i = 1; i <= nline; i++)
{
- ItemId ii = PageGetItemId(phdr, i);
+ ItemId ii = PageGetItemId(phdr, i);
+
if (ii->lp_off <= offset)
ii->lp_off += size;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.109 2004/08/29 04:12:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/md.c,v 1.110 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct _MdfdVec
{
- File mdfd_vfd; /* fd number in fd.c's pool */
- BlockNumber mdfd_segno; /* segment number, from 0 */
-#ifndef LET_OS_MANAGE_FILESIZE /* for large relations */
+ File mdfd_vfd; /* fd number in fd.c's pool */
+ BlockNumber mdfd_segno; /* segment number, from 0 */
+#ifndef LET_OS_MANAGE_FILESIZE /* for large relations */
struct _MdfdVec *mdfd_chain; /* next segment, or NULL */
#endif
} MdfdVec;
* we keep track of pending fsync operations: we need to remember all relation
* segments that have been written since the last checkpoint, so that we can
* fsync them down to disk before completing the next checkpoint. This hash
- * table remembers the pending operations. We use a hash table not because
+ * table remembers the pending operations. We use a hash table not because
* we want to look up individual operations, but simply as a convenient way
* of eliminating duplicate requests.
*
*/
typedef struct
{
- RelFileNode rnode; /* the targeted relation */
- BlockNumber segno; /* which segment */
+ RelFileNode rnode; /* the targeted relation */
+ BlockNumber segno; /* which segment */
} PendingOperationEntry;
static HTAB *pendingOpsTable = NULL;
static MdfdVec *mdopen(SMgrRelation reln, bool allowNotFound);
static bool register_dirty_segment(SMgrRelation reln, MdfdVec *seg);
static MdfdVec *_fdvec_alloc(void);
+
#ifndef LET_OS_MANAGE_FILESIZE
static MdfdVec *_mdfd_openseg(SMgrRelation reln, BlockNumber segno,
- int oflags);
+ int oflags);
#endif
static MdfdVec *_mdfd_getseg(SMgrRelation reln, BlockNumber blkno,
- bool allowNotFound);
+ bool allowNotFound);
static BlockNumber _mdnblocks(File file, Size blcksz);
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * Create pending-operations hashtable if we need it. Currently,
- * we need it if we are standalone (not under a postmaster) OR
- * if we are a bootstrap-mode subprocess of a postmaster (that is,
- * a startup or bgwriter process).
+ * Create pending-operations hashtable if we need it. Currently, we
+ * need it if we are standalone (not under a postmaster) OR if we are
+ * a bootstrap-mode subprocess of a postmaster (that is, a startup or
+ * bgwriter process).
*/
if (!IsUnderPostmaster || IsBootstrapProcessingMode())
{
pendingOpsTable = hash_create("Pending Ops Table",
100L,
&hash_ctl,
- HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
+ HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
if (pendingOpsTable == NULL)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
static MdfdVec *
mdopen(SMgrRelation reln, bool allowNotFound)
{
- MdfdVec *mdfd;
+ MdfdVec *mdfd;
char *path;
File fd;
FileTruncate(v->mdfd_vfd, 0);
FileUnlink(v->mdfd_vfd);
v = v->mdfd_chain;
- Assert(ov != reln->md_fd); /* we never drop the 1st
- * segment */
+ Assert(ov != reln->md_fd); /* we never drop the 1st segment */
pfree(ov);
}
else if (priorblocks + ((BlockNumber) RELSEG_SIZE) > nblocks)
/*
* If we are in the bgwriter, the sync had better include all fsync
* requests that were queued by backends before the checkpoint REDO
- * point was determined. We go that a little better by accepting
- * all requests queued up to the point where we start fsync'ing.
+ * point was determined. We go that a little better by accepting all
+ * requests queued up to the point where we start fsync'ing.
*/
AbsorbFsyncRequests();
{
/*
* If fsync is off then we don't have to bother opening the file
- * at all. (We delay checking until this point so that changing
+ * at all. (We delay checking until this point so that changing
* fsync on the fly behaves sensibly.)
*/
if (enableFsync)
{
SMgrRelation reln;
- MdfdVec *seg;
+ MdfdVec *seg;
/*
- * Find or create an smgr hash entry for this relation.
- * This may seem a bit unclean -- md calling smgr? But it's
- * really the best solution. It ensures that the open file
- * reference isn't permanently leaked if we get an error here.
- * (You may say "but an unreferenced SMgrRelation is still a
- * leak!" Not really, because the only case in which a checkpoint
- * is done by a process that isn't about to shut down is in the
+ * Find or create an smgr hash entry for this relation. This
+ * may seem a bit unclean -- md calling smgr? But it's really
+ * the best solution. It ensures that the open file reference
+ * isn't permanently leaked if we get an error here. (You may
+ * say "but an unreferenced SMgrRelation is still a leak!"
+ * Not really, because the only case in which a checkpoint is
+ * done by a process that isn't about to shut down is in the
* bgwriter, and it will periodically do smgrcloseall(). This
* fact justifies our not closing the reln in the success path
* either, which is a good thing since in non-bgwriter cases
reln = smgropen(entry->rnode);
/*
- * It is possible that the relation has been dropped or truncated
- * since the fsync request was entered. Therefore, we have to
- * allow file-not-found errors. This applies both during
- * _mdfd_getseg() and during FileSync, since fd.c might have
- * closed the file behind our back.
+ * It is possible that the relation has been dropped or
+ * truncated since the fsync request was entered. Therefore,
+ * we have to allow file-not-found errors. This applies both
+ * during _mdfd_getseg() and during FileSync, since fd.c might
+ * have closed the file behind our back.
*/
seg = _mdfd_getseg(reln,
entry->segno * ((BlockNumber) RELSEG_SIZE),
/* all done */
return v;
}
-
-#endif /* LET_OS_MANAGE_FILESIZE */
+#endif /* LET_OS_MANAGE_FILESIZE */
/*
* _mdfd_getseg() -- Find the segment of the relation holding the
_mdfd_getseg(SMgrRelation reln, BlockNumber blkno, bool allowNotFound)
{
MdfdVec *v = mdopen(reln, allowNotFound);
+
#ifndef LET_OS_MANAGE_FILESIZE
BlockNumber segstogo;
BlockNumber nextsegno;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.79 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgr.c,v 1.80 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct f_smgr
{
- bool (*smgr_init) (void); /* may be NULL */
+ bool (*smgr_init) (void); /* may be NULL */
bool (*smgr_shutdown) (void); /* may be NULL */
bool (*smgr_close) (SMgrRelation reln);
bool (*smgr_create) (SMgrRelation reln, bool isRedo);
bool (*smgr_unlink) (RelFileNode rnode, bool isRedo);
bool (*smgr_extend) (SMgrRelation reln, BlockNumber blocknum,
- char *buffer, bool isTemp);
+ char *buffer, bool isTemp);
bool (*smgr_read) (SMgrRelation reln, BlockNumber blocknum,
- char *buffer);
+ char *buffer);
bool (*smgr_write) (SMgrRelation reln, BlockNumber blocknum,
- char *buffer, bool isTemp);
+ char *buffer, bool isTemp);
BlockNumber (*smgr_nblocks) (SMgrRelation reln);
BlockNumber (*smgr_truncate) (SMgrRelation reln, BlockNumber nblocks,
- bool isTemp);
+ bool isTemp);
bool (*smgr_immedsync) (SMgrRelation reln);
- bool (*smgr_commit) (void); /* may be NULL */
- bool (*smgr_abort) (void); /* may be NULL */
- bool (*smgr_sync) (void); /* may be NULL */
+ bool (*smgr_commit) (void); /* may be NULL */
+ bool (*smgr_abort) (void); /* may be NULL */
+ bool (*smgr_sync) (void); /* may be NULL */
} f_smgr;
static const f_smgr smgrsw[] = {
/* magnetic disk */
{mdinit, NULL, mdclose, mdcreate, mdunlink, mdextend,
- mdread, mdwrite, mdnblocks, mdtruncate, mdimmedsync,
- NULL, NULL, mdsync
+ mdread, mdwrite, mdnblocks, mdtruncate, mdimmedsync,
+ NULL, NULL, mdsync
}
};
-static const int NSmgr = lengthof(smgrsw);
+static const int NSmgr = lengthof(smgrsw);
/*
typedef struct xl_smgr_create
{
- RelFileNode rnode;
+ RelFileNode rnode;
} xl_smgr_create;
typedef struct xl_smgr_truncate
{
- BlockNumber blkno;
- RelFileNode rnode;
+ BlockNumber blkno;
+ RelFileNode rnode;
} xl_smgr_truncate;
/* local function prototypes */
static void smgrshutdown(int code, Datum arg);
static void smgr_internal_unlink(RelFileNode rnode, int which,
- bool isTemp, bool isRedo);
+ bool isTemp, bool isRedo);
/*
{
if (smgrsw[i].smgr_init)
{
- if (! (*(smgrsw[i].smgr_init)) ())
+ if (!(*(smgrsw[i].smgr_init)) ())
elog(FATAL, "smgr initialization failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
{
if (smgrsw[i].smgr_shutdown)
{
- if (! (*(smgrsw[i].smgr_shutdown)) ())
+ if (!(*(smgrsw[i].smgr_shutdown)) ())
elog(FATAL, "smgr shutdown failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
SMgrRelation
smgropen(RelFileNode rnode)
{
- SMgrRelation reln;
+ SMgrRelation reln;
bool found;
if (SMgrRelationHash == NULL)
void
smgrclose(SMgrRelation reln)
{
- if (! (*(smgrsw[reln->smgr_which].smgr_close)) (reln))
+ if (!(*(smgrsw[reln->smgr_which].smgr_close)) (reln))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not close relation %u/%u/%u: %m",
hash_seq_init(&status, SMgrRelationHash);
while ((reln = (SMgrRelation) hash_seq_search(&status)) != NULL)
- {
smgrclose(reln);
- }
}
/*
void
smgrclosenode(RelFileNode rnode)
{
- SMgrRelation reln;
+ SMgrRelation reln;
/* Nothing to do if hashtable not set up */
if (SMgrRelationHash == NULL)
void
smgrcreate(SMgrRelation reln, bool isTemp, bool isRedo)
{
- XLogRecPtr lsn;
- XLogRecData rdata;
- xl_smgr_create xlrec;
+ XLogRecPtr lsn;
+ XLogRecData rdata;
+ xl_smgr_create xlrec;
PendingRelDelete *pending;
/*
* database, so create a per-database subdirectory if needed.
*
* XXX this is a fairly ugly violation of module layering, but this seems
- * to be the best place to put the check. Maybe TablespaceCreateDbspace
- * should be here and not in commands/tablespace.c? But that would imply
- * importing a lot of stuff that smgr.c oughtn't know, either.
+ * to be the best place to put the check. Maybe
+ * TablespaceCreateDbspace should be here and not in
+ * commands/tablespace.c? But that would imply importing a lot of
+ * stuff that smgr.c oughtn't know, either.
*/
TablespaceCreateDbspace(reln->smgr_rnode.spcNode,
reln->smgr_rnode.dbNode,
isRedo);
- if (! (*(smgrsw[reln->smgr_which].smgr_create)) (reln, isRedo))
+ if (!(*(smgrsw[reln->smgr_which].smgr_create)) (reln, isRedo))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not create relation %u/%u/%u: %m",
return;
/*
- * Make a non-transactional XLOG entry showing the file creation. It's
- * non-transactional because we should replay it whether the transaction
- * commits or not; if not, the file will be dropped at abort time.
+ * Make a non-transactional XLOG entry showing the file creation.
+ * It's non-transactional because we should replay it whether the
+ * transaction commits or not; if not, the file will be dropped at
+ * abort time.
*/
xlrec.rnode = reln->smgr_rnode;
void
smgrdounlink(SMgrRelation reln, bool isTemp, bool isRedo)
{
- RelFileNode rnode = reln->smgr_rnode;
+ RelFileNode rnode = reln->smgr_rnode;
int which = reln->smgr_which;
/* Close the file and throw away the hashtable entry */
smgr_internal_unlink(RelFileNode rnode, int which, bool isTemp, bool isRedo)
{
/*
- * Get rid of any leftover buffers for the rel (shouldn't be any in the
- * commit case, but there can be in the abort case).
+ * Get rid of any leftover buffers for the rel (shouldn't be any in
+ * the commit case, but there can be in the abort case).
*/
DropRelFileNodeBuffers(rnode, isTemp, 0);
/*
- * Tell the free space map to forget this relation. It won't be accessed
- * any more anyway, but we may as well recycle the map space quickly.
+ * Tell the free space map to forget this relation. It won't be
+ * accessed any more anyway, but we may as well recycle the map space
+ * quickly.
*/
FreeSpaceMapForgetRel(&rnode);
/*
* And delete the physical files.
*
- * Note: we treat deletion failure as a WARNING, not an error,
- * because we've already decided to commit or abort the current xact.
+ * Note: we treat deletion failure as a WARNING, not an error, because
+ * we've already decided to commit or abort the current xact.
*/
- if (! (*(smgrsw[which].smgr_unlink)) (rnode, isRedo))
+ if (!(*(smgrsw[which].smgr_unlink)) (rnode, isRedo))
ereport(WARNING,
(errcode_for_file_access(),
errmsg("could not unlink relation %u/%u/%u: %m",
void
smgrextend(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
{
- if (! (*(smgrsw[reln->smgr_which].smgr_extend)) (reln, blocknum, buffer,
- isTemp))
+ if (!(*(smgrsw[reln->smgr_which].smgr_extend)) (reln, blocknum, buffer,
+ isTemp))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not extend relation %u/%u/%u: %m",
void
smgrread(SMgrRelation reln, BlockNumber blocknum, char *buffer)
{
- if (! (*(smgrsw[reln->smgr_which].smgr_read)) (reln, blocknum, buffer))
+ if (!(*(smgrsw[reln->smgr_which].smgr_read)) (reln, blocknum, buffer))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not read block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not read block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
void
smgrwrite(SMgrRelation reln, BlockNumber blocknum, char *buffer, bool isTemp)
{
- if (! (*(smgrsw[reln->smgr_which].smgr_write)) (reln, blocknum, buffer,
- isTemp))
+ if (!(*(smgrsw[reln->smgr_which].smgr_write)) (reln, blocknum, buffer,
+ isTemp))
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not write block %u of relation %u/%u/%u: %m",
- blocknum,
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode)));
+ errmsg("could not write block %u of relation %u/%u/%u: %m",
+ blocknum,
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode)));
}
/*
BlockNumber newblks;
/*
- * Tell the free space map to forget anything it may have stored
- * for the about-to-be-deleted blocks. We want to be sure it
- * won't return bogus block numbers later on.
+ * Tell the free space map to forget anything it may have stored for
+ * the about-to-be-deleted blocks. We want to be sure it won't return
+ * bogus block numbers later on.
*/
FreeSpaceMapTruncateRel(&reln->smgr_rnode, nblocks);
if (newblks == InvalidBlockNumber)
ereport(ERROR,
(errcode_for_file_access(),
- errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
- reln->smgr_rnode.spcNode,
- reln->smgr_rnode.dbNode,
- reln->smgr_rnode.relNode,
- nblocks)));
+ errmsg("could not truncate relation %u/%u/%u to %u blocks: %m",
+ reln->smgr_rnode.spcNode,
+ reln->smgr_rnode.dbNode,
+ reln->smgr_rnode.relNode,
+ nblocks)));
if (!isTemp)
{
/*
- * Make a non-transactional XLOG entry showing the file truncation.
- * It's non-transactional because we should replay it whether the
- * transaction commits or not; the underlying file change is certainly
- * not reversible.
+ * Make a non-transactional XLOG entry showing the file
+ * truncation. It's non-transactional because we should replay it
+ * whether the transaction commits or not; the underlying file
+ * change is certainly not reversible.
*/
- XLogRecPtr lsn;
- XLogRecData rdata;
+ XLogRecPtr lsn;
+ XLogRecData rdata;
xl_smgr_truncate xlrec;
xlrec.blkno = newblks;
void
smgrimmedsync(SMgrRelation reln)
{
- if (! (*(smgrsw[reln->smgr_which].smgr_immedsync)) (reln))
+ if (!(*(smgrsw[reln->smgr_which].smgr_immedsync)) (reln))
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not sync relation %u/%u/%u: %m",
{
if (smgrsw[i].smgr_commit)
{
- if (! (*(smgrsw[i].smgr_commit)) ())
+ if (!(*(smgrsw[i].smgr_commit)) ())
elog(ERROR, "transaction commit failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
{
if (smgrsw[i].smgr_abort)
{
- if (! (*(smgrsw[i].smgr_abort)) ())
+ if (!(*(smgrsw[i].smgr_abort)) ())
elog(ERROR, "transaction abort failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
{
if (smgrsw[i].smgr_sync)
{
- if (! (*(smgrsw[i].smgr_sync)) ())
+ if (!(*(smgrsw[i].smgr_sync)) ())
elog(ERROR, "storage sync failed on %s: %m",
DatumGetCString(DirectFunctionCall1(smgrout,
Int16GetDatum(i))));
/*
* First, force bufmgr to drop any buffers it has for the to-be-
- * truncated blocks. We must do this, else subsequent XLogReadBuffer
- * operations will not re-extend the file properly.
+ * truncated blocks. We must do this, else subsequent
+ * XLogReadBuffer operations will not re-extend the file properly.
*/
DropRelFileNodeBuffers(xlrec->rnode, false, xlrec->blkno);
/* Do the truncation */
newblks = (*(smgrsw[reln->smgr_which].smgr_truncate)) (reln,
- xlrec->blkno,
+ xlrec->blkno,
false);
if (newblks == InvalidBlockNumber)
ereport(WARNING,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/storage/smgr/smgrtype.c,v 1.24 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/storage/smgr/smgrtype.c,v 1.25 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct smgrid
{
- const char *smgr_name;
+ const char *smgr_name;
} smgrid;
/*
{"magnetic disk"}
};
-static const int NStorageManagers = lengthof(StorageManager);
+static const int NStorageManagers = lengthof(StorageManager);
Datum
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.74 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/fastpath.c,v 1.75 2004/08/29 05:06:49 momjian Exp $
*
* NOTES
* This cruft is the server side of PQfn.
getTypeOutputInfo(rettype, &typoutput, &typioparam, &typisvarlena);
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
retval,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(-1)));
pq_sendcountedtext(&buf, outputstr, strlen(outputstr), false);
pfree(outputstr);
&typsend, &typioparam, &typisvarlena);
outputbytes = DatumGetByteaP(OidFunctionCall2(typsend,
retval,
- ObjectIdGetDatum(typioparam)));
+ ObjectIdGetDatum(typioparam)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
/*
* Begin parsing the buffer contents.
fcinfo->arg[i] = OidFunctionCall2(typreceive,
PointerGetDatum(&abuf),
- ObjectIdGetDatum(typioparam));
+ ObjectIdGetDatum(typioparam));
/* Trouble if it didn't eat the whole buffer */
if (abuf.cursor != abuf.len)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.429 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/postgres.c,v 1.430 2004/08/29 05:06:49 momjian Exp $
*
* NOTES
* this is the "main" module of the postgres backend and
CommandDest whereToSendOutput = Debug;
/* flag for logging end of session */
-bool Log_disconnections = false;
+bool Log_disconnections = false;
LogStmtLevel log_statement = LOGSTMT_NONE;
*/
/* max_stack_depth converted to bytes for speed of checking */
-static int max_stack_depth_bytes = 2048*1024;
+static int max_stack_depth_bytes = 2048 * 1024;
/* stack base pointer (initialized by PostgresMain) */
static char *stack_base_ptr = NULL;
Node *parsetree = (Node *) lfirst(list_item);
querytree_list = list_concat(querytree_list,
- pg_analyze_and_rewrite(parsetree,
- paramTypes,
- numParams));
+ pg_analyze_and_rewrite(parsetree,
+ paramTypes,
+ numParams));
}
return querytree_list;
{
Node *parsetree = (Node *) lfirst(parsetree_item);
const char *commandTag;
-
+
if (IsA(parsetree, ExplainStmt) &&
- ((ExplainStmt *)parsetree)->analyze)
- parsetree = (Node *)(((ExplainStmt *)parsetree)->query);
-
+ ((ExplainStmt *) parsetree)->analyze)
+ parsetree = (Node *) (((ExplainStmt *) parsetree)->query);
+
if (IsA(parsetree, PrepareStmt))
- parsetree = (Node *)(((PrepareStmt *)parsetree)->query);
-
+ parsetree = (Node *) (((PrepareStmt *) parsetree)->query);
+
if (IsA(parsetree, SelectStmt))
- continue; /* optimization for frequent command */
-
+ continue; /* optimization for frequent command */
+
if (log_statement == LOGSTMT_MOD &&
(IsA(parsetree, InsertStmt) ||
IsA(parsetree, UpdateStmt) ||
IsA(parsetree, DeleteStmt) ||
IsA(parsetree, TruncateStmt) ||
(IsA(parsetree, CopyStmt) &&
- ((CopyStmt *)parsetree)->is_from))) /* COPY FROM */
+ ((CopyStmt *) parsetree)->is_from))) /* COPY FROM */
{
ereport(LOG,
(errmsg("statement: %s", query_string)));
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Make sure we are in a transaction command */
if (save_log_duration)
ereport(LOG,
(errmsg("duration: %ld.%03ld ms",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000)));
/*
- * Output a duration_statement to the log if the query has exceeded
- * the min duration, or if we are to print all durations.
+ * Output a duration_statement to the log if the query has
+ * exceeded the min duration, or if we are to print all durations.
*/
if (save_log_min_duration_statement == 0 ||
(save_log_min_duration_statement > 0 &&
usecs >= save_log_min_duration_statement * 1000))
ereport(LOG,
(errmsg("duration: %ld.%03ld ms statement: %s",
- (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
- (stop_t.tv_usec - start_t.tv_usec) / 1000),
- (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
+ (long) ((stop_t.tv_sec - start_t.tv_sec) * 1000 +
+ (stop_t.tv_usec - start_t.tv_usec) / 1000),
+ (long) (stop_t.tv_usec - start_t.tv_usec) % 1000,
query_string)));
}
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/*
if (ptype == InvalidOid || ptype == UNKNOWNOID)
ereport(ERROR,
(errcode(ERRCODE_INDETERMINATE_DATATYPE),
- errmsg("could not determine data type of parameter $%d",
- i + 1)));
+ errmsg("could not determine data type of parameter $%d",
+ i + 1)));
param_list = lappend_oid(param_list, ptype);
}
ereport(ERROR,
(errcode(ERRCODE_PROTOCOL_VIOLATION),
errmsg("bind message supplies %d parameters, but prepared statement \"%s\" requires %d",
- numParams, stmt_name, list_length(pstmt->argtype_list))));
+ numParams, stmt_name, list_length(pstmt->argtype_list))));
/*
* Create the portal. Allow silent replacement of an existing portal
params[i].value =
OidFunctionCall2(typreceive,
PointerGetDatum(&pbuf),
- ObjectIdGetDatum(typioparam));
+ ObjectIdGetDatum(typioparam));
/* Trouble if it didn't eat the whole buffer */
if (pbuf.cursor != pbuf.len)
* If we didn't plan the query before, do it now. This allows the
* planner to make use of the concrete parameter values we now have.
*
- * This happens only for unnamed statements, and so switching into
- * the statement context for planning is correct (see notes in
+ * This happens only for unnamed statements, and so switching into the
+ * statement context for planning is correct (see notes in
* exec_parse_message).
*/
if (pstmt->plan_list == NIL && pstmt->query_list != NIL &&
ereport(ERROR,
(errcode(ERRCODE_IN_FAILED_SQL_TRANSACTION),
errmsg("current transaction is aborted, "
- "commands ignored until end of transaction block")));
+ "commands ignored until end of transaction block")));
}
/* Check for cancel signal before we start execution */
*/
ereport(WARNING,
(errcode(ERRCODE_CRASH_SHUTDOWN),
- errmsg("terminating connection because of crash of another server process"),
- errdetail("The postmaster has commanded this server process to roll back"
- " the current transaction and exit, because another"
- " server process exited abnormally and possibly corrupted"
- " shared memory."),
+ errmsg("terminating connection because of crash of another server process"),
+ errdetail("The postmaster has commanded this server process to roll back"
+ " the current transaction and exit, because another"
+ " server process exited abnormally and possibly corrupted"
+ " shared memory."),
errhint("In a moment you should be able to reconnect to the"
" database and repeat your command.")));
void
check_stack_depth(void)
{
- char stack_top_loc;
- int stack_depth;
+ char stack_top_loc;
+ int stack_depth;
/*
* Compute distance from PostgresMain's local variables to my own
*
* Note: in theory stack_depth should be ptrdiff_t or some such, but
- * since the whole point of this code is to bound the value to something
- * much less than integer-sized, int should work fine.
+ * since the whole point of this code is to bound the value to
+ * something much less than integer-sized, int should work fine.
*/
stack_depth = (int) (stack_base_ptr - &stack_top_loc);
+
/*
- * Take abs value, since stacks grow up on some machines, down on others
+ * Take abs value, since stacks grow up on some machines, down on
+ * others
*/
if (stack_depth < 0)
stack_depth = -stack_depth;
+
/*
* Trouble?
*
* The test on stack_base_ptr prevents us from erroring out if called
- * during process setup or in a non-backend process. Logically it should
- * be done first, but putting it here avoids wasting cycles during normal
- * cases.
+ * during process setup or in a non-backend process. Logically it
+ * should be done first, but putting it here avoids wasting cycles
+ * during normal cases.
*/
if (stack_depth > max_stack_depth_bytes &&
stack_base_ptr != NULL)
char *tmp;
int firstchar;
char stack_base;
- StringInfoData input_message;
+ StringInfoData input_message;
sigjmp_buf local_sigjmp_buf;
volatile bool send_rfq = true;
-
+
/*
* Catch standard options before doing much else. This even works on
* systems without getopt_long.
elog(FATAL, "%s: could not locate my own executable path",
argv[0]);
}
-
+
if (pkglib_path[0] == '\0')
get_pkglib_path(my_exec_path, pkglib_path);
/*
* ignore system indexes
*
- * As of PG 7.4 this is safe to allow from the client,
- * since it only disables reading the system indexes,
- * not writing them. Worst case consequence is slowness.
+ * As of PG 7.4 this is safe to allow from the client, since
+ * it only disables reading the system indexes, not
+ * writing them. Worst case consequence is slowness.
*/
IgnoreSystemIndexes(true);
break;
break;
case 'p':
+
/*
* p - special flag passed if backend was forked by a
* postmaster.
/*
* wait N seconds to allow attach from a debugger
*/
- pg_usleep(atoi(optarg)*1000000L);
+ pg_usleep(atoi(optarg) * 1000000L);
break;
case 'c':
while (gucopts)
{
- char *name;
- char *value;
+ char *name;
+ char *value;
name = lfirst(gucopts);
gucopts = lnext(gucopts);
{
ereport(FATAL,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("invalid command-line arguments for server process"),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errmsg("invalid command-line arguments for server process"),
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
XLOGPathInit();
(errcode(ERRCODE_SYNTAX_ERROR),
errmsg("%s: invalid command-line arguments",
argv[0]),
- errhint("Try \"%s --help\" for more information.", argv[0])));
+ errhint("Try \"%s --help\" for more information.", argv[0])));
}
else if (argc - optind == 1)
dbname = argv[optind];
* If an exception is encountered, processing resumes here so we abort
* the current transaction and start a new one.
*
- * You might wonder why this isn't coded as an infinite loop around
- * a PG_TRY construct. The reason is that this is the bottom of the
+ * You might wonder why this isn't coded as an infinite loop around a
+ * PG_TRY construct. The reason is that this is the bottom of the
* exception stack, and so with PG_TRY there would be no exception
* handler in force at all during the CATCH part. By leaving the
* outermost setjmp always active, we have at least some chance of
- * recovering from an error during error recovery. (If we get into
- * an infinite loop thereby, it will soon be stopped by overflow of
+ * recovering from an error during error recovery. (If we get into an
+ * infinite loop thereby, it will soon be stopped by overflow of
* elog.c's internal state stack.)
*/
/*
* NOTE: if you are tempted to add more code in this if-block,
* consider the high probability that it should be in
- * AbortTransaction() instead. The only stuff done directly here
- * should be stuff that is guaranteed to apply *only* for outer-level
- * error recovery, such as adjusting the FE/BE protocol status.
+ * AbortTransaction() instead. The only stuff done directly here
+ * should be stuff that is guaranteed to apply *only* for
+ * outer-level error recovery, such as adjusting the FE/BE
+ * protocol status.
*/
/* Since not using PG_TRY, must reset error stack by hand */
/*
* Forget any pending QueryCancel request, since we're returning
- * to the idle loop anyway, and cancel the statement timer if running.
+ * to the idle loop anyway, and cancel the statement timer if
+ * running.
*/
QueryCancelPending = false;
disable_sig_alarm(true);
QueryCancelPending = false; /* again in case timeout occurred */
/*
- * Turn off these interrupts too. This is only needed here and not
- * in other exception-catching places since these interrupts are
- * only enabled while we wait for client input.
+ * Turn off these interrupts too. This is only needed here and
+ * not in other exception-catching places since these interrupts
+ * are only enabled while we wait for client input.
*/
DisableNotifyInterrupt();
DisableCatchupInterrupt();
EmitErrorReport();
/*
- * Make sure debug_query_string gets reset before we possibly clobber
- * the storage it points at.
+ * Make sure debug_query_string gets reset before we possibly
+ * clobber the storage it points at.
*/
debug_query_string = NULL;
*
* This is also a good time to send collected statistics to the
* collector, and to update the PS stats display. We avoid doing
- * those every time through the message loop because it'd slow down
- * processing of batched messages.
+ * those every time through the message loop because it'd slow
+ * down processing of batched messages.
*/
if (send_rfq)
{
/*
* on_proc_exit handler to log end of session
*/
-static void
+static void
log_disconnections(int code, Datum arg)
{
- Port *port = MyProcPort;
+ Port *port = MyProcPort;
struct timeval end;
- int hours, minutes, seconds;
-
- char session_time[20];
- char uname[6+NAMEDATALEN];
- char dbname[10+NAMEDATALEN];
- char remote_host[7 + NI_MAXHOST];
- char remote_port[7 + NI_MAXSERV];
-
- snprintf(uname, sizeof(uname)," user=%s",port->user_name);
- snprintf(dbname, sizeof(dbname)," database=%s",port->database_name);
- snprintf(remote_host,sizeof(remote_host)," host=%s",
+ int hours,
+ minutes,
+ seconds;
+
+ char session_time[20];
+ char uname[6 + NAMEDATALEN];
+ char dbname[10 + NAMEDATALEN];
+ char remote_host[7 + NI_MAXHOST];
+ char remote_port[7 + NI_MAXSERV];
+
+ snprintf(uname, sizeof(uname), " user=%s", port->user_name);
+ snprintf(dbname, sizeof(dbname), " database=%s", port->database_name);
+ snprintf(remote_host, sizeof(remote_host), " host=%s",
port->remote_host);
- snprintf(remote_port,sizeof(remote_port)," port=%s",port->remote_port);
+ snprintf(remote_port, sizeof(remote_port), " port=%s", port->remote_port);
- gettimeofday(&end,NULL);
+ gettimeofday(&end, NULL);
if (end.tv_usec < port->session_start.tv_usec)
{
/* if time has gone backwards for some reason say so, or print time */
if (end.tv_sec < 0)
- snprintf(session_time,sizeof(session_time),"negative!");
+ snprintf(session_time, sizeof(session_time), "negative!");
else
- /* for stricter accuracy here we could round - this is close enough */
+
+ /*
+ * for stricter accuracy here we could round - this is close
+ * enough
+ */
snprintf(session_time, sizeof(session_time),
- "%d:%02d:%02d.%02d",
- hours, minutes, seconds, (int) (end.tv_usec/10000));
-
+ "%d:%02d:%02d.%02d",
+ hours, minutes, seconds, (int) (end.tv_usec / 10000));
+
ereport(
- LOG,
- (errmsg("disconnection: session time: %s%s%s%s%s",
- session_time,uname,dbname,remote_host,remote_port)));
+ LOG,
+ (errmsg("disconnection: session time: %s%s%s%s%s",
+ session_time, uname, dbname, remote_host, remote_port)));
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.84 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/pquery.c,v 1.85 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* ActivePortal is the currently executing Portal (the most closely nested,
* if there are several).
*/
-Portal ActivePortal = NULL;
+Portal ActivePortal = NULL;
static uint32 RunFromStore(Portal portal, ScanDirection direction, long count,
AssertState(portal->status == PORTAL_NEW); /* else extra PortalStart */
/*
- * Set up global portal context pointers. (Should we set QueryContext?)
+ * Set up global portal context pointers. (Should we set
+ * QueryContext?)
*/
saveActivePortal = ActivePortal;
saveResourceOwner = CurrentResourceOwner;
SetQuerySnapshot();
/*
- * Create QueryDesc in portal's context; for the moment, set
- * the destination to None.
+ * Create QueryDesc in portal's context; for the moment,
+ * set the destination to None.
*/
queryDesc = CreateQueryDesc((Query *) linitial(portal->parseTrees),
- (Plan *) linitial(portal->planTrees),
+ (Plan *) linitial(portal->planTrees),
None_Receiver,
params,
false);
* Reset cursor position data to "start of query"
*/
portal->atStart = true;
- portal->atEnd = false; /* allow fetches */
+ portal->atEnd = false; /* allow fetches */
portal->portalPos = 0;
portal->posOverflow = false;
break;
case PORTAL_UTIL_SELECT:
/*
- * We don't set query snapshot here, because PortalRunUtility
- * will take care of it.
+ * We don't set query snapshot here, because
+ * PortalRunUtility will take care of it.
*/
portal->tupDesc =
UtilityTupleDescriptor(((Query *) linitial(portal->parseTrees))->utilityStmt);
* Reset cursor position data to "start of query"
*/
portal->atStart = true;
- portal->atEnd = false; /* allow fetches */
+ portal->atEnd = false; /* allow fetches */
portal->portalPos = 0;
portal->posOverflow = false;
break;
if (log_executor_stats && portal->strategy != PORTAL_MULTI_QUERY)
{
ereport(DEBUG3,
- (errmsg_internal("PortalRun")));
+ (errmsg_internal("PortalRun")));
/* PORTAL_MULTI_QUERY logs its own stats per query */
ResetUsage();
}
-
+
/*
* Check for improper portal use, and mark portal active.
*/
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now true.
+ * Since it's a forward fetch, say DONE iff atEnd is now
+ * true.
*/
result = portal->atEnd;
break;
portal->status = PORTAL_READY;
/*
- * Since it's a forward fetch, say DONE iff atEnd is now true.
+ * Since it's a forward fetch, say DONE iff atEnd is now
+ * true.
*/
result = portal->atEnd;
break;
default:
elog(ERROR, "unrecognized portal strategy: %d",
(int) portal->strategy);
- result = false; /* keep compiler quiet */
+ result = false; /* keep compiler quiet */
break;
}
}
default:
elog(ERROR, "unsupported portal strategy");
- result = 0; /* keep compiler quiet */
+ result = 0; /* keep compiler quiet */
break;
}
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.227 2004/08/29 04:12:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/tcop/utility.c,v 1.228 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
errmsg(rentry->nonexistent_msg, rel->relname)));
}
- Assert(false); /* Should be impossible */
+ Assert(false); /* Should be impossible */
}
static void
switch (stmt->kind)
{
- /*
- * START TRANSACTION, as defined by SQL99:
- * Identical to BEGIN. Same code for both.
- */
+ /*
+ * START TRANSACTION, as defined by SQL99:
+ * Identical to BEGIN. Same code for both.
+ */
case TRANS_STMT_BEGIN:
case TRANS_STMT_START:
{
case TRANS_STMT_SAVEPOINT:
{
- ListCell *cell;
- char *name = NULL;
+ ListCell *cell;
+ char *name = NULL;
- RequireTransactionChain((void *)stmt, "SAVEPOINT");
+ RequireTransactionChain((void *) stmt, "SAVEPOINT");
- foreach (cell, stmt->options)
+ foreach(cell, stmt->options)
{
- DefElem *elem = lfirst(cell);
+ DefElem *elem = lfirst(cell);
+
if (strcmp(elem->defname, "savepoint_name") == 0)
name = strVal(elem->arg);
}
break;
case TRANS_STMT_RELEASE:
- RequireTransactionChain((void *)stmt, "RELEASE SAVEPOINT");
+ RequireTransactionChain((void *) stmt, "RELEASE SAVEPOINT");
ReleaseSavepoint(stmt->options);
break;
case TRANS_STMT_ROLLBACK_TO:
- RequireTransactionChain((void *)stmt, "ROLLBACK TO SAVEPOINT");
+ RequireTransactionChain((void *) stmt, "ROLLBACK TO SAVEPOINT");
RollbackToSavepoint(stmt->options);
+
/*
- * CommitTransactionCommand is in charge
- * of re-defining the savepoint again
+ * CommitTransactionCommand is in charge of
+ * re-defining the savepoint again
*/
break;
}
stmt->unique,
stmt->primary,
stmt->isconstraint,
- false, /* is_alter_table */
- true, /* check_rights */
- false, /* skip_build */
- false); /* quiet */
+ false, /* is_alter_table */
+ true, /* check_rights */
+ false, /* skip_build */
+ false); /* quiet */
}
break;
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else if (strcmp(n->name, "SESSION CHARACTERISTICS") == 0)
if (strcmp(item->defname, "transaction_isolation") == 0)
SetPGVariable("default_transaction_isolation",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
else if (strcmp(item->defname, "transaction_read_only") == 0)
SetPGVariable("default_transaction_read_only",
- list_make1(item->arg), n->is_local);
+ list_make1(item->arg), n->is_local);
}
}
else
{
AlterTableStmt *stmt = (AlterTableStmt *) parsetree;
- /*
- * We might be supporting ALTER INDEX here, so
- * set the completion table appropriately.
- * Catch all other possibilities with ALTER TABLE
+ /*
+ * We might be supporting ALTER INDEX here, so set the
+ * completion table appropriately. Catch all other
+ * possibilities with ALTER TABLE
*/
- if(stmt->relkind == OBJECT_INDEX)
+ if (stmt->relkind == OBJECT_INDEX)
tag = "ALTER INDEX";
else
tag = "ALTER TABLE";
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.110 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/acl.c,v 1.111 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static const char *aclparse(const char *s, AclItem *aip);
static bool aclitem_match(const AclItem *a1, const AclItem *a2);
static void check_circularity(const Acl *old_acl, const AclItem *mod_aip,
- AclId ownerid);
+ AclId ownerid);
static Acl *recursive_revoke(Acl *acl, AclId grantee, AclMode revoke_privs,
- AclId ownerid, DropBehavior behavior);
+ AclId ownerid, DropBehavior behavior);
static bool in_group(AclId uid, AclId gid);
static AclMode convert_priv_string(text *priv_type_text);
static AclMode convert_language_priv_string(text *priv_type_text);
static Oid convert_schema_name(text *schemaname);
static AclMode convert_schema_priv_string(text *priv_type_text);
-static Oid convert_tablespace_name(text *tablespacename);
+static Oid convert_tablespace_name(text *tablespacename);
static AclMode convert_tablespace_priv_string(text *priv_type_text);
ereport(ERROR,
(errcode(ERRCODE_NAME_TOO_LONG),
errmsg("identifier too long"),
- errdetail("Identifier must be less than %d characters.",
- NAMEDATALEN)));
+ errdetail("Identifier must be less than %d characters.",
+ NAMEDATALEN)));
n[len++] = *s;
}
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("unrecognized key word: \"%s\"", name),
- errhint("ACL key word must be \"group\" or \"user\".")));
+ errhint("ACL key word must be \"group\" or \"user\".")));
s = getid(s, name); /* move s to the name beyond the keyword */
if (name[0] == '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
errmsg("missing name"),
- errhint("A name must follow the \"group\" or \"user\" key word.")));
+ errhint("A name must follow the \"group\" or \"user\" key word.")));
}
if (name[0] == '\0')
idtype = ACL_IDTYPE_WORLD;
aip->ai_grantor = BOOTSTRAP_USESYSID;
ereport(WARNING,
(errcode(ERRCODE_INVALID_GRANTOR),
- errmsg("defaulting grantor to user ID %u", BOOTSTRAP_USESYSID)));
+ errmsg("defaulting grantor to user ID %u", BOOTSTRAP_USESYSID)));
}
ACLITEM_SET_PRIVS_IDTYPE(*aip, privs, goption, idtype);
/*
* Note that the owner's entry shows all ordinary privileges but no
* grant options. This is because his grant options come "from the
- * system" and not from his own efforts. (The SQL spec says that
- * the owner's rights come from a "_SYSTEM" authid.) However, we do
+ * system" and not from his own efforts. (The SQL spec says that the
+ * owner's rights come from a "_SYSTEM" authid.) However, we do
* consider that the owner's ordinary privileges are self-granted;
* this lets him revoke them. We implement the owner's grant options
* without any explicit "_SYSTEM"-like ACL entry, by internally
break;
case ACL_MODECHG_DEL:
ACLITEM_SET_RIGHTS(new_aip[dst],
- old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
+ old_rights & ~ACLITEM_GET_RIGHTS(*mod_aip));
break;
case ACL_MODECHG_EQL:
ACLITEM_SET_RIGHTS(new_aip[dst],
}
/*
- * Remove abandoned privileges (cascading revoke). Currently we
- * can only handle this when the grantee is a user.
+ * Remove abandoned privileges (cascading revoke). Currently we can
+ * only handle this when the grantee is a user.
*/
if ((old_goptions & ~new_goptions) != 0)
{
aclnewowner(const Acl *old_acl, AclId oldownerid, AclId newownerid)
{
Acl *new_acl;
- AclItem *new_aip;
- AclItem *old_aip;
- AclItem *dst_aip;
- AclItem *src_aip;
- AclItem *targ_aip;
+ AclItem *new_aip;
+ AclItem *old_aip;
+ AclItem *dst_aip;
+ AclItem *src_aip;
+ AclItem *targ_aip;
bool newpresent = false;
int dst,
src,
/*
* Make a copy of the given ACL, substituting new owner ID for old
- * wherever it appears as either grantor or grantee. Also note if
- * the new owner ID is already present.
+ * wherever it appears as either grantor or grantee. Also note if the
+ * new owner ID is already present.
*/
num = ACL_NUM(old_acl);
old_aip = ACL_DAT(old_acl);
/*
* If the old ACL contained any references to the new owner, then we
- * may now have generated an ACL containing duplicate entries. Find
+ * may now have generated an ACL containing duplicate entries. Find
* them and merge them so that there are not duplicates. (This is
* relatively expensive since we use a stupid O(N^2) algorithm, but
* it's unlikely to be the normal case.)
* To simplify deletion of duplicate entries, we temporarily leave them
* in the array but set their privilege masks to zero; when we reach
* such an entry it's just skipped. (Thus, a side effect of this code
- * will be to remove privilege-free entries, should there be any in the
- * input.) dst is the next output slot, targ is the currently considered
- * input slot (always >= dst), and src scans entries to the right of targ
- * looking for duplicates. Once an entry has been emitted to dst it is
- * known duplicate-free and need not be considered anymore.
+ * will be to remove privilege-free entries, should there be any in
+ * the input.) dst is the next output slot, targ is the currently
+ * considered input slot (always >= dst), and src scans entries to the
+ * right of targ looking for duplicates. Once an entry has been
+ * emitted to dst it is known duplicate-free and need not be
+ * considered anymore.
*/
if (newpresent)
{
/*
* For now, grant options can only be granted to users, not groups or
- * PUBLIC. Otherwise we'd have to work a bit harder here.
+ * PUBLIC. Otherwise we'd have to work a bit harder here.
*/
Assert(ACLITEM_GET_IDTYPE(*mod_aip) == ACL_IDTYPE_UID);
own_privs = aclmask(acl,
mod_aip->ai_grantor,
ownerid,
- ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
+ ACL_GRANT_OPTION_FOR(ACLITEM_GET_GOPTIONS(*mod_aip)),
ACLMASK_ALL);
own_privs = ACL_OPTION_TO_PRIVS(own_privs);
*/
for (i = 0; i < num; i++)
{
- AclItem *aidata = &aidat[i];
+ AclItem *aidata = &aidat[i];
if (ACLITEM_GET_IDTYPE(*aidata) == ACL_IDTYPE_WORLD
|| (ACLITEM_GET_IDTYPE(*aidata) == ACL_IDTYPE_UID
}
/*
- * Check privileges granted via groups. We do this in a separate
- * pass to minimize expensive lookups in pg_group.
+ * Check privileges granted via groups. We do this in a separate pass
+ * to minimize expensive lookups in pg_group.
*/
remaining = (mask & ~result);
for (i = 0; i < num; i++)
{
- AclItem *aidata = &aidat[i];
+ AclItem *aidata = &aidat[i];
if (ACLITEM_GET_IDTYPE(*aidata) == ACL_IDTYPE_GID
&& (aidata->ai_privs & remaining)
if (u_grantee == 0 && g_grantee == 0)
{
- aclitem->ai_grantee = ACL_ID_WORLD;
+ aclitem ->ai_grantee = ACL_ID_WORLD;
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_WORLD);
}
}
else if (u_grantee != 0)
{
- aclitem->ai_grantee = u_grantee;
+ aclitem ->ai_grantee = u_grantee;
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_UID);
}
- else /* (g_grantee != 0) */
+ else
+/* (g_grantee != 0) */
{
- aclitem->ai_grantee = g_grantee;
+ aclitem ->ai_grantee = g_grantee;
ACLITEM_SET_IDTYPE(*aclitem, ACL_IDTYPE_GID);
}
- aclitem->ai_grantor = grantor;
+ aclitem ->ai_grantor = grantor;
ACLITEM_SET_PRIVS(*aclitem, priv);
if (goption)
static Oid
convert_tablespace_name(text *tablespacename)
{
- char *spcname;
+ char *spcname;
Oid oid;
spcname = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(tablespacename)));
+ PointerGetDatum(tablespacename)));
oid = get_tablespace_oid(spcname);
if (!OidIsValid(oid))
* Copyright (c) 2003, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.12 2003/11/29 19:51:57 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/array_userfuncs.c,v 1.13 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
else
ereport(ERROR,
(errcode(ERRCODE_DATA_EXCEPTION),
- errmsg("argument must be empty or one-dimensional array")));
+ errmsg("argument must be empty or one-dimensional array")));
/*
* We arrange to look up info about element type only once per series
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("cannot concatenate incompatible arrays"),
- errdetail("Arrays with differing element dimensions are "
- "not compatible for concatenation.")));
+ errdetail("Arrays with differing element dimensions are "
+ "not compatible for concatenation.")));
dims[i] = dims1[i];
lbs[i] = lbs1[i];
if (element_type == 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid array element type OID: %u", element_type)));
+ errmsg("invalid array element type OID: %u", element_type)));
if (ndims < 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.109 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/arrayfuncs.c,v 1.110 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int typlen, bool typbyval, char typalign);
static int array_cmp(FunctionCallInfo fcinfo);
static Datum array_type_length_coerce_internal(ArrayType *src,
- int32 desttypmod,
- bool isExplicit,
- FmgrInfo *fmgr_info);
+ int32 desttypmod,
+ bool isExplicit,
+ FmgrInfo *fmgr_info);
/*---------------------------------------------------------------------
if (ndim_braces != ndim)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
for (i = 0; i < ndim; ++i)
{
if (dim[i] != dim_braces[i])
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("array dimensions incompatible with array literal")));
+ errmsg("array dimensions incompatible with array literal")));
}
}
static int
ArrayCount(char *str, int *dim, char typdelim)
{
- int nest_level = 0,
- i;
- int ndim = 1,
- temp[MAXDIM],
- nelems[MAXDIM],
- nelems_last[MAXDIM];
- bool scanning_string = false;
- bool eoArray = false;
- bool empty_array = true;
- char *ptr;
- ArrayParseState parse_state = ARRAY_NO_LEVEL;
+ int nest_level = 0,
+ i;
+ int ndim = 1,
+ temp[MAXDIM],
+ nelems[MAXDIM],
+ nelems_last[MAXDIM];
+ bool scanning_string = false;
+ bool eoArray = false;
+ bool empty_array = true;
+ char *ptr;
+ ArrayParseState parse_state = ARRAY_NO_LEVEL;
for (i = 0; i < MAXDIM; ++i)
{
if (parse_state == ARRAY_ELEM_STARTED ||
parse_state == ARRAY_QUOTED_ELEM_STARTED)
empty_array = false;
-
+
switch (*ptr)
{
case '\0':
errmsg("malformed array literal: \"%s\"", str)));
break;
case '\\':
+
/*
* An escape must be after a level start, after an
- * element start, or after an element delimiter. In any
- * case we now must be past an element start.
+ * element start, or after an element delimiter. In
+ * any case we now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state != ARRAY_QUOTED_ELEM_STARTED)
parse_state = ARRAY_ELEM_STARTED;
/* skip the escaped character */
errmsg("malformed array literal: \"%s\"", str)));
break;
case '\"':
+
/*
* A quote must be after a level start, after a quoted
- * element start, or after an element delimiter. In any
- * case we now must be past an element start.
+ * element start, or after an element delimiter. In
+ * any case we now must be past an element start.
*/
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_QUOTED_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
scanning_string = !scanning_string;
if (scanning_string)
parse_state = ARRAY_QUOTED_ELEM_STARTED;
{
/*
* A left brace can occur if no nesting has
- * occurred yet, after a level start, or
- * after a level delimiter.
+ * occurred yet, after a level start, or after a
+ * level delimiter.
*/
if (parse_state != ARRAY_NO_LEVEL &&
parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_LEVEL_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_STARTED;
if (nest_level >= MAXDIM)
ereport(ERROR,
{
/*
* A right brace can occur after an element start,
- * an element completion, a quoted element completion,
- * or a level completion.
+ * an element completion, a quoted element
+ * completion, or a level completion.
*/
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED &&
- !(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED))
+ !(nest_level == 1 && parse_state == ARRAY_LEVEL_STARTED))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_LEVEL_COMPLETED;
if (nest_level == 0)
ereport(ERROR,
nest_level--;
if ((nelems_last[nest_level] != 1) &&
- (nelems[nest_level] != nelems_last[nest_level]))
+ (nelems[nest_level] != nelems_last[nest_level]))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("multidimensional arrays must have "
- "array expressions with matching "
- "dimensions")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("multidimensional arrays must have "
+ "array expressions with matching "
+ "dimensions")));
nelems_last[nest_level] = nelems[nest_level];
nelems[nest_level] = 1;
if (nest_level == 0)
if (*ptr == typdelim)
{
/*
- * Delimiters can occur after an element start,
- * an element completion, a quoted element
- * completion, or a level completion.
- */
+ * Delimiters can occur after an element
+ * start, an element completion, a quoted
+ * element completion, or a level completion.
+ */
if (parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_COMPLETED &&
- parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
+ parse_state != ARRAY_QUOTED_ELEM_COMPLETED &&
parse_state != ARRAY_LEVEL_COMPLETED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
if (parse_state == ARRAY_LEVEL_COMPLETED)
parse_state = ARRAY_LEVEL_DELIMITED;
else
else if (!isspace(*ptr))
{
/*
- * Other non-space characters must be after a level
- * start, after an element start, or after an element
- * delimiter. In any case we now must be past an
- * element start.
- */
+ * Other non-space characters must be after a
+ * level start, after an element start, or
+ * after an element delimiter. In any case we
+ * now must be past an element start.
+ */
if (parse_state != ARRAY_LEVEL_STARTED &&
parse_state != ARRAY_ELEM_STARTED &&
parse_state != ARRAY_ELEM_DELIMITED)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
parse_state = ARRAY_ELEM_STARTED;
}
}
temp[ndim - 1]++;
ptr++;
}
-
+
/* only whitespace is allowed after the closing brace */
while (*ptr)
{
if (!isspace(*ptr++))
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed array literal: \"%s\"", str)));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed array literal: \"%s\"", str)));
}
-
+
/* special case for an empty array */
if (empty_array)
return 0;
-
+
for (i = 0; i < ndim; ++i)
dim[i] = temp[i];
if (scanning_string)
{
itemquoted = true;
- /* Crunch the string on top of the first quote. */
+
+ /*
+ * Crunch the string on top of the first
+ * quote.
+ */
for (cptr = ptr; *cptr != '\0'; cptr++)
*cptr = *(cptr + 1);
/* Back up to not miss following character. */
*tmp,
*retval,
**values,
- /*
- * 33 per dim since we assume 15 digits per number + ':' +'[]'
- *
- * +2 allows for assignment operator + trailing null
- */
+
+ /*
+ * 33 per dim since we assume 15 digits per number + ':' +'[]'
+ *
+ * +2 allows for assignment operator + trailing null
+ */
dims_str[(MAXDIM * 33) + 2];
bool *needquotes,
needdims = false;
}
/*
- * we will need to add explicit dimensions if any dimension
- * has a lower bound other than one
+ * we will need to add explicit dimensions if any dimension has a
+ * lower bound other than one
*/
for (i = 0; i < ndim; i++)
{
itemvalue = fetch_att(p, typbyval, typlen);
values[i] = DatumGetCString(FunctionCall3(&my_extra->proc,
itemvalue,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(-1)));
p = att_addlength(p, typlen, PointerGetDatum(p));
p = (char *) att_align(p, typalign);
/* add explicit dimensions if required */
if (needdims)
{
- char *ptr = dims_str;
+ char *ptr = dims_str;
for (i = 0; i < ndim; i++)
{
outputbytes = DatumGetByteaP(FunctionCall2(&my_extra->proc,
itemvalue,
- ObjectIdGetDatum(typioparam)));
+ ObjectIdGetDatum(typioparam)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
pq_sendbytes(&buf, VARDATA(outputbytes),
/*
* We arrange to look up the equality function only once per
* series of calls, assuming the element type doesn't change
- * underneath us. The typcache is used so that we have no
- * memory leakage when being used as an index support function.
+ * underneath us. The typcache is used so that we have no memory
+ * leakage when being used as an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
errmsg("cannot compare arrays of different element types")));
/*
- * We arrange to look up the comparison function only once per series of
- * calls, assuming the element type doesn't change underneath us.
- * The typcache is used so that we have no memory leakage when being used
- * as an index support function.
+ * We arrange to look up the comparison function only once per series
+ * of calls, assuming the element type doesn't change underneath us.
+ * The typcache is used so that we have no memory leakage when being
+ * used as an index support function.
*/
typentry = (TypeCacheEntry *) fcinfo->flinfo->fn_extra;
if (typentry == NULL ||
if (!OidIsValid(typentry->cmp_proc_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify a comparison function for type %s",
- format_type_be(element_type))));
+ errmsg("could not identify a comparison function for type %s",
+ format_type_be(element_type))));
fcinfo->flinfo->fn_extra = (void *) typentry;
}
typlen = typentry->typlen;
* Portions Copyright (c) 1999-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.21 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ascii.c,v 1.22 2004/08/29 05:06:49 momjian Exp $
*
*-----------------------------------------------------------------------
*/
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("encoding conversion from %s to ASCII not supported",
- pg_encoding_to_char(enc))));
+ errmsg("encoding conversion from %s to ASCII not supported",
+ pg_encoding_to_char(enc))));
return; /* keep compiler quiet */
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/bool.c,v 1.34 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/bool.c,v 1.35 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* function for standard EVERY aggregate implementation conforming to SQL 2003.
* must be strict. It is also named bool_and for homogeneity.
*/
-Datum booland_statefunc(PG_FUNCTION_ARGS)
+Datum
+booland_statefunc(PG_FUNCTION_ARGS)
{
PG_RETURN_BOOL(PG_GETARG_BOOL(0) && PG_GETARG_BOOL(1));
}
/* function for standard ANY/SOME aggregate conforming to SQL 2003.
* must be strict. The name of the aggregate is bool_or. See the doc.
*/
-Datum boolor_statefunc(PG_FUNCTION_ARGS)
+Datum
+boolor_statefunc(PG_FUNCTION_ARGS)
{
PG_RETURN_BOOL(PG_GETARG_BOOL(0) || PG_GETARG_BOOL(1));
}
* workings can be found in the book "Software Solutions in C" by
* Dale Schumacher, Academic Press, ISBN: 0-12-632360-7.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.63 2004/05/07 00:24:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/cash.c,v 1.64 2004/08/29 05:06:49 momjian Exp $
*/
#include "postgres.h"
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type money: \"%s\"", str)));
+ errmsg("invalid input syntax for type money: \"%s\"", str)));
result = (value * sgn);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.101 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/date.c,v 1.102 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char *str = PG_GETARG_CSTRING(0);
DateADT date;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tzp;
int dtype;
{
DateADT date = PG_GETARG_DATEADT(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
date2timestamptz(DateADT dateVal)
{
TimestampTz result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
date_eq_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_ne_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_lt_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_gt_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_le_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_ge_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
date_cmp_timestamptz(PG_FUNCTION_ARGS)
{
DateADT dateVal = PG_GETARG_DATEADT(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = date2timestamptz(dateVal);
Datum
timestamptz_eq_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_ne_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_lt_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_gt_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_le_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_ge_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
Datum
timestamptz_cmp_date(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
DateADT dateVal = PG_GETARG_DATEADT(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = date2timestamptz(dateVal);
{
Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
{
TimestampTz timestamp = PG_GETARG_TIMESTAMP(0);
DateADT result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
{
AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
DateADT result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
int32 typmod = PG_GETARG_INT32(2);
TimeADT result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
int nf;
{
TimeADT time = PG_GETARG_TIMEADT(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char buf[MAXDATELEN + 1];
{
Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
TimeADT result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
{
TimestampTz timestamp = PG_GETARG_TIMESTAMP(0);
TimeADT result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
fsec_t fsec;
if (type == UNITS)
{
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
time2tm(time, tm, &fsec);
int32 typmod = PG_GETARG_INT32(2);
TimeTzADT *result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
int nf;
{
TimeTzADT *time = PG_GETARG_TIMETZADT_P(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
{
TimeADT time = PG_GETARG_TIMEADT(0);
TimeTzADT *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
{
TimestampTz timestamp = PG_GETARG_TIMESTAMP(0);
TimeTzADT *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
fsec_t fsec;
if (VARSIZE(str) - VARHDRSZ > MAXDATELEN)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("invalid input syntax for type time with time zone: \"%s\"",
- VARDATA(str))));
+ errmsg("invalid input syntax for type time with time zone: \"%s\"",
+ VARDATA(str))));
sp = VARDATA(str);
dp = dstr;
double dummy;
int tz;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
timetz2tm(time, tm, &fsec, &tz);
{
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("\"time with time zone\" units \"%s\" not recognized",
- DatumGetCString(DirectFunctionCall1(textout,
+ errmsg("\"time with time zone\" units \"%s\" not recognized",
+ DatumGetCString(DirectFunctionCall1(textout,
PointerGetDatum(units))))));
result = 0;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.132 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/datetime.c,v 1.133 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
/*
* Check for valid day of month, now that we know for sure the
- * month and year. Note we don't use MD_FIELD_OVERFLOW here,
+ * month and year. Note we don't use MD_FIELD_OVERFLOW here,
* since it seems unlikely that "Feb 29" is a YMD-order error.
*/
if (tm->tm_mday > day_tab[isleap(tm->tm_year)][tm->tm_mon - 1])
* any faster than this code.
*/
int
-DetermineLocalTimeZone(struct pg_tm *tm)
+DetermineLocalTimeZone(struct pg_tm * tm)
{
int tz;
int date,
locsec,
delta1,
delta2;
- struct pg_tm *tx;
+ struct pg_tm *tx;
if (HasCTZSet)
{
/*
* First, generate the pg_time_t value corresponding to the given
- * y/m/d/h/m/s taken as GMT time. If this overflows, punt and
- * decide the timezone is GMT. (We only need to worry about overflow
- * on machines where pg_time_t is 32 bits.)
+ * y/m/d/h/m/s taken as GMT time. If this overflows, punt and decide
+ * the timezone is GMT. (We only need to worry about overflow on
+ * machines where pg_time_t is 32 bits.)
*/
if (!IS_VALID_JULIAN(tm->tm_year, tm->tm_mon, tm->tm_mday))
goto overflow;
goto overflow;
/*
- * Use pg_localtime to convert that pg_time_t to broken-down time,
- * and reassemble to get a representation of local time. (We could get
+ * Use pg_localtime to convert that pg_time_t to broken-down time, and
+ * reassemble to get a representation of local time. (We could get
* overflow of a few hours in the result, but the delta calculation
* should still work.)
*/
delta1 = mysec - locsec;
/*
- * However, if that GMT time and the local time we are
- * actually interested in are on opposite sides of a
- * daylight-savings-time transition, then this is not the time
- * offset we want. So, adjust the pg_time_t to be what we think
- * the GMT time corresponding to our target local time is, and
- * repeat the pg_localtime() call and delta calculation.
+ * However, if that GMT time and the local time we are actually
+ * interested in are on opposite sides of a daylight-savings-time
+ * transition, then this is not the time offset we want. So, adjust
+ * the pg_time_t to be what we think the GMT time corresponding to our
+ * target local time is, and repeat the pg_localtime() call and delta
+ * calculation.
*
* We have to watch out for overflow while adjusting the pg_time_t.
*/
/*
* We may have to do it again to get the correct delta.
*
- * It might seem we should just loop until we get the same delta
- * twice in a row, but if we've been given an "impossible" local
- * time (in the gap during a spring-forward transition) we'd never
- * get out of the loop. The behavior we want is that "impossible"
- * times are taken as standard time, and also that ambiguous times
- * (during a fall-back transition) are taken as standard time.
- * Therefore, we bias the code to prefer the standard-time solution.
+ * It might seem we should just loop until we get the same delta twice in
+ * a row, but if we've been given an "impossible" local time (in the
+ * gap during a spring-forward transition) we'd never get out of the
+ * loop. The behavior we want is that "impossible" times are taken as
+ * standard time, and also that ambiguous times (during a fall-back
+ * transition) are taken as standard time. Therefore, we bias the code
+ * to prefer the standard-time solution.
*/
if (delta2 != delta1 && tx->tm_isdst != 0)
{
mysec += delta2;
tx = pg_localtime(&mysec);
if (!tx)
- goto overflow; /* probably can't happen */
+ goto overflow; /* probably can't happen */
day = date2j(tx->tm_year + 1900, tx->tm_mon + 1, tx->tm_mday) -
UNIX_EPOCH_JDATE;
locsec = tx->tm_sec + (tx->tm_min + (day * 24 + tx->tm_hour) * 60) * 60;
case DTK_TIME:
/* previous field was "t" for ISO time */
dterr = DecodeNumberField(strlen(field[i]), field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
* fields later. Example: 20011223 or 040506
*/
dterr = DecodeNumberField(flen, field[i],
- (fmask | DTK_DATE_M),
+ (fmask | DTK_DATE_M),
&tmask, tm,
fsec, &is2digits);
if (dterr < 0)
/* timezone not specified? then find local timezone if possible */
if ((tzp != NULL) && (!(fmask & DTK_M(TZ))))
{
- struct pg_tm tt,
+ struct pg_tm tt,
*tmp = &tt;
/*
if (haveTextMonth)
{
/*
- * We are at the first numeric field of a date that included
- * a textual month name. We want to support the variants
- * MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as unambiguous
- * inputs. We will also accept MON-DD-YY or DD-MON-YY in
- * either DMY or MDY modes, as well as YY-MON-DD in YMD mode.
+ * We are at the first numeric field of a date that
+ * included a textual month name. We want to support the
+ * variants MON-DD-YYYY, DD-MON-YYYY, and YYYY-MON-DD as
+ * unambiguous inputs. We will also accept MON-DD-YY or
+ * DD-MON-YY in either DMY or MDY modes, as well as
+ * YY-MON-DD in YMD mode.
*/
if (flen >= 3 || DateOrder == DATEORDER_YMD)
{
if (flen >= 3 && *is2digits)
{
/* Guess that first numeric field is day was wrong */
- *tmask = DTK_M(DAY); /* YEAR is already set */
+ *tmask = DTK_M(DAY); /* YEAR is already set */
tm->tm_mday = tm->tm_year;
tm->tm_year = val;
*is2digits = FALSE;
}
/*
- * When processing a year field, mark it for adjustment if it's
- * only one or two digits.
+ * When processing a year field, mark it for adjustment if it's only
+ * one or two digits.
*/
if (*tmask == DTK_M(YEAR))
*is2digits = (flen <= 2);
*/
static int
DecodeNumberField(int len, char *str, int fmask,
- int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
+ int *tmask, struct pg_tm * tm, fsec_t *fsec, int *is2digits)
{
char *cp;
while ((*cp != '\0') && (*cp != ':') && (*cp != '.'))
cp++;
if ((*cp == ':') &&
- (DecodeTime(field[i] + 1, fmask, &tmask, tm, fsec) == 0))
+ (DecodeTime(field[i] + 1, fmask, &tmask, tm, fsec) == 0))
{
if (*field[i] == '-')
{
break;
case DTERR_TZDISP_OVERFLOW:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
- errmsg("time zone displacement out of range: \"%s\"",
- str)));
+ (errcode(ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE),
+ errmsg("time zone displacement out of range: \"%s\"",
+ str)));
break;
case DTERR_BAD_FORMAT:
default:
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.12 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/encode.c,v 1.13 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (s >= srcend)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid hexadecimal data: odd number of digits")));
+ errmsg("invalid hexadecimal data: odd number of digits")));
v2 = get_hex(*s++);
*p++ = v1 | v2;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.108 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/float.c,v 1.109 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void CheckFloat8Val(double val);
static int float4_cmp_internal(float4 a, float4 b);
static int float8_cmp_internal(float8 a, float8 b);
+
#ifndef HAVE_CBRT
static double cbrt(double x);
#endif /* HAVE_CBRT */
/* C99 standard way */
return (double) INFINITY;
#else
+
/*
* On some platforms, HUGE_VAL is an infinity, elsewhere it's just the
- * largest normal double. We assume forcing an overflow will get us
- * a true infinity.
+ * largest normal double. We assume forcing an overflow will get us a
+ * true infinity.
*/
return (double) (HUGE_VAL * HUGE_VAL);
#endif
/* C99 standard way */
return (float) INFINITY;
#else
+
/*
* On some platforms, HUGE_VAL is an infinity, elsewhere it's just the
- * largest normal double. We assume forcing an overflow will get us
- * a true infinity.
+ * largest normal double. We assume forcing an overflow will get us a
+ * true infinity.
*/
return (float) (HUGE_VAL * HUGE_VAL);
#endif
int
is_infinite(double val)
{
- int inf = isinf(val);
+ int inf = isinf(val);
if (inf == 0)
return 0;
/*
* endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to
- * the original input string.
+ * recognized as a valid floating point number. orig_num points to the
+ * original input string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid
- * the vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the
+ * vagaries of strtod() on different platforms.
*
- * In releases prior to 8.0, we accepted an empty string as valid
- * input (yielding a float4 of 0). In 8.0, we accept empty
- * strings, but emit a warning noting that the feature is
- * deprecated. In 8.1+, the warning should be replaced by an
- * error.
+ * In releases prior to 8.0, we accepted an empty string as valid input
+ * (yielding a float4 of 0). In 8.0, we accept empty strings, but emit
+ * a warning noting that the feature is deprecated. In 8.1+, the
+ * warning should be replaced by an error.
*/
if (*num == '\0')
{
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but
- * not all platforms support that yet (and some accept them but
- * set ERANGE anyway...) Therefore, we check for these inputs
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not
+ * all platforms support that yet (and some accept them but set
+ * ERANGE anyway...) Therefore, we check for these inputs
* ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
}
else if (pg_strncasecmp(num, "-Infinity", 9) == 0)
{
- val = - get_float4_infinity();
+ val = -get_float4_infinity();
endptr = num + 9;
}
else if (errno == ERANGE)
{
/*
* Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given
- * "inf" or "infinity".
+ * to point one byte beyond the end of the string when given "inf"
+ * or "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
}
-#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
+#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
/* skip trailing whitespace */
while (*endptr != '\0' && isspace((unsigned char) *endptr))
strcpy(ascii, "-Infinity");
break;
default:
- {
- int ndig = FLT_DIG + extra_float_digits;
- if (ndig < 1)
- ndig = 1;
+ {
+ int ndig = FLT_DIG + extra_float_digits;
- sprintf(ascii, "%.*g", ndig, num);
- }
+ if (ndig < 1)
+ ndig = 1;
+
+ sprintf(ascii, "%.*g", ndig, num);
+ }
}
PG_RETURN_CSTRING(ascii);
/*
* endptr points to the first character _after_ the sequence we
- * recognized as a valid floating point number. orig_num points to
- * the original input string.
+ * recognized as a valid floating point number. orig_num points to the
+ * original input string.
*/
orig_num = num;
/*
- * Check for an empty-string input to begin with, to avoid
- * the vagaries of strtod() on different platforms.
+ * Check for an empty-string input to begin with, to avoid the
+ * vagaries of strtod() on different platforms.
*
- * In releases prior to 8.0, we accepted an empty string as valid
- * input (yielding a float8 of 0). In 8.0, we accept empty
- * strings, but emit a warning noting that the feature is
- * deprecated. In 8.1+, the warning should be replaced by an
- * error.
+ * In releases prior to 8.0, we accepted an empty string as valid input
+ * (yielding a float8 of 0). In 8.0, we accept empty strings, but emit
+ * a warning noting that the feature is deprecated. In 8.1+, the
+ * warning should be replaced by an error.
*/
if (*num == '\0')
{
ereport(WARNING,
(errcode(ERRCODE_WARNING_DEPRECATED_FEATURE),
- errmsg("deprecated input syntax for type double precision: \"\""),
+ errmsg("deprecated input syntax for type double precision: \"\""),
errdetail("This input will be rejected in "
"a future release of PostgreSQL.")));
PG_RETURN_FLOAT8(0.0);
if (endptr == num || errno != 0)
{
/*
- * C99 requires that strtod() accept NaN and [-]Infinity, but
- * not all platforms support that yet (and some accept them but
- * set ERANGE anyway...) Therefore, we check for these inputs
+ * C99 requires that strtod() accept NaN and [-]Infinity, but not
+ * all platforms support that yet (and some accept them but set
+ * ERANGE anyway...) Therefore, we check for these inputs
* ourselves.
*/
if (pg_strncasecmp(num, "NaN", 3) == 0)
}
else if (pg_strncasecmp(num, "-Infinity", 9) == 0)
{
- val = - get_float8_infinity();
+ val = -get_float8_infinity();
endptr = num + 9;
}
else if (errno == ERANGE)
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("\"%s\" is out of range for type double precision",
- orig_num)));
+ errmsg("\"%s\" is out of range for type double precision",
+ orig_num)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
{
/*
* Many versions of Solaris have a bug wherein strtod sets endptr
- * to point one byte beyond the end of the string when given
- * "inf" or "infinity".
+ * to point one byte beyond the end of the string when given "inf"
+ * or "infinity".
*/
if (endptr != num && endptr[-1] == '\0')
endptr--;
}
-#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
+#endif /* HAVE_BUGGY_SOLARIS_STRTOD */
/* skip trailing whitespace */
while (*endptr != '\0' && isspace((unsigned char) *endptr))
if (*endptr != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- orig_num)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ orig_num)));
if (!isinf(val))
CheckFloat8Val(val);
strcpy(ascii, "-Infinity");
break;
default:
- {
- int ndig = DBL_DIG + extra_float_digits;
- if (ndig < 1)
- ndig = 1;
+ {
+ int ndig = DBL_DIG + extra_float_digits;
- sprintf(ascii, "%.*g", ndig, num);
- }
+ if (ndig < 1)
+ ndig = 1;
+
+ sprintf(ascii, "%.*g", ndig, num);
+ }
}
PG_RETURN_CSTRING(ascii);
float8 result;
/*
- * The SQL spec requires that we emit a particular SQLSTATE error
- * code for certain error conditions.
+ * The SQL spec requires that we emit a particular SQLSTATE error code
+ * for certain error conditions.
*/
if ((arg1 == 0 && arg2 < 0) ||
(arg1 < 0 && floor(arg2) != arg2))
float8 result;
/*
- * Emit particular SQLSTATE error codes for ln(). This is required
- * by the SQL standard.
+ * Emit particular SQLSTATE error codes for ln(). This is required by
+ * the SQL standard.
*/
if (arg1 == 0.0)
ereport(ERROR,
/*
* Emit particular SQLSTATE error codes for log(). The SQL spec
- * doesn't define log(), but it does define ln(), so it makes
- * sense to emit the same error code for an analogous error
- * condition.
+ * doesn't define log(), but it does define ln(), so it makes sense to
+ * emit the same error code for an analogous error condition.
*/
if (arg1 == 0.0)
ereport(ERROR,
/* -----------------------------------------------------------------------
* formatting.c
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.76 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/formatting.c,v 1.77 2004/08/29 05:06:49 momjian Exp $
*
*
* Portions Copyright (c) 1999-2004, PostgreSQL Global Development Group
*/
typedef struct TmToChar
{
- struct pg_tm tm; /* classic 'tm' struct */
+ struct pg_tm tm; /* classic 'tm' struct */
fsec_t fsec; /* fractional seconds */
char *tzn; /* timezone */
} TmToChar;
static int dch_time(int arg, char *inout, int suf, int flag, FormatNode *node, void *data);
static int dch_date(int arg, char *inout, int suf, int flag, FormatNode *node, void *data);
static void do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec);
+ struct pg_tm * tm, fsec_t *fsec);
static char *fill_str(char *str, int c, int max);
static FormatNode *NUM_cache(int len, NUMDesc *Num, char *pars_str, bool *shouldFree);
static char *int_to_roman(int number);
for (n = node, s = inout; n->type != NODE_TYPE_END; n++)
{
- if (flag == FROM_CHAR && *s=='\0')
+ if (flag == FROM_CHAR && *s == '\0')
+
/*
- * The input string is shorter than format picture,
- * so it's good time to break this loop...
- *
- * Note: this isn't relevant for TO_CHAR mode, beacuse
- * it use 'inout' allocated by format picture length.
+ * The input string is shorter than format picture, so it's
+ * good time to break this loop...
+ *
+ * Note: this isn't relevant for TO_CHAR mode, beacuse it use
+ * 'inout' allocated by format picture length.
*/
break;
}
}
}
-
+
++s; /* ! */
}
dch_time(int arg, char *inout, int suf, int flag, FormatNode *node, void *data)
{
char *p_inout = inout;
- struct pg_tm *tm = NULL;
+ struct pg_tm *tm = NULL;
TmFromChar *tmfc = NULL;
TmToChar *tmtc = NULL;
*p_inout;
int i,
len;
- struct pg_tm *tm = NULL;
+ struct pg_tm *tm = NULL;
TmFromChar *tmfc = NULL;
TmToChar *tmtc = NULL;
{
if (tm->tm_year <= 9999 && tm->tm_year >= -9998)
sprintf(inout, "%0*d",
- S_FM(suf) ? 0 : 4,
- arg == DCH_YYYY ?
- YEAR_ABS(tm->tm_year) :
- YEAR_ABS(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday)));
+ S_FM(suf) ? 0 : 4,
+ arg == DCH_YYYY ?
+ YEAR_ABS(tm->tm_year) :
+ YEAR_ABS(date2isoyear(
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday)));
else
sprintf(inout, "%d",
- arg == DCH_YYYY ?
- YEAR_ABS(tm->tm_year) :
- YEAR_ABS(date2isoyear(
- tm->tm_year,
- tm->tm_mon,
- tm->tm_mday)));
+ arg == DCH_YYYY ?
+ YEAR_ABS(tm->tm_year) :
+ YEAR_ABS(date2isoyear(
+ tm->tm_year,
+ tm->tm_mon,
+ tm->tm_mday)));
if (S_THth(suf))
str_numth(p_inout, inout, S_TH_TYPE(suf));
return strlen(p_inout) - 1;
if (flag == TO_CHAR)
{
snprintf(buff, sizeof(buff), "%03d",
- arg == DCH_YYY ?
- YEAR_ABS(tm->tm_year) :
- YEAR_ABS(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday)));
+ arg == DCH_YYY ?
+ YEAR_ABS(tm->tm_year) :
+ YEAR_ABS(date2isoyear(tm->tm_year,
+ tm->tm_mon, tm->tm_mday)));
i = strlen(buff);
strcpy(inout, buff + (i - 3));
if (S_THth(suf))
if (flag == TO_CHAR)
{
snprintf(buff, sizeof(buff), "%02d",
- arg == DCH_YY ?
- YEAR_ABS(tm->tm_year) :
- YEAR_ABS(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday)));
+ arg == DCH_YY ?
+ YEAR_ABS(tm->tm_year) :
+ YEAR_ABS(date2isoyear(tm->tm_year,
+ tm->tm_mon, tm->tm_mday)));
i = strlen(buff);
strcpy(inout, buff + (i - 2));
if (S_THth(suf))
if (flag == TO_CHAR)
{
snprintf(buff, sizeof(buff), "%1d",
- arg == DCH_Y ?
- YEAR_ABS(tm->tm_year) :
- YEAR_ABS(date2isoyear(tm->tm_year,
- tm->tm_mon, tm->tm_mday)));
+ arg == DCH_Y ?
+ YEAR_ABS(tm->tm_year) :
+ YEAR_ABS(date2isoyear(tm->tm_year,
+ tm->tm_mon, tm->tm_mday)));
i = strlen(buff);
strcpy(inout, buff + (i - 1));
if (S_THth(suf))
return ent;
}
- return NULL; /* never */
+ return NULL; /* never */
}
static DCHCacheEntry *
datetime_to_char_body(TmToChar *tmtc, text *fmt)
{
FormatNode *format;
- struct pg_tm *tm = NULL;
+ struct pg_tm *tm = NULL;
char *fmt_str,
- *result;
- bool incache;
- int fmt_len = VARSIZE(fmt) - VARHDRSZ;
+ *result;
+ bool incache;
+ int fmt_len = VARSIZE(fmt) - VARHDRSZ;
tm = tmtcTm(tmtc);
tm->tm_wday = (date2j(tm->tm_year, tm->tm_mon, tm->tm_mday) + 1) % 7;
/*
* Allocate new memory if format picture is bigger than static cache
- * and not use cache (call parser always)
+ * and not use cache (call parser always)
*/
if (fmt_len > DCH_CACHE_SIZE)
{
parse_format(format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
- (format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
+ (format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
}
else
* Use cache buffers
*/
DCHCacheEntry *ent;
+
incache = TRUE;
if ((ent = DCH_cache_search(fmt_str)) == NULL)
parse_format(ent->format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
- (ent->format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
+ (ent->format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
#ifdef DEBUG_TO_FROM_CHAR
/* dump_node(ent->format, fmt_len); */
*/
if (result && *result)
{
- int len = strlen(result);
-
+ int len = strlen(result);
+
if (len)
{
text *res = (text *) palloc(len + 1 + VARHDRSZ);
text *fmt = PG_GETARG_TEXT_P(1);
Timestamp result;
int tz;
- struct pg_tm tm;
+ struct pg_tm tm;
fsec_t fsec;
do_to_timestamp(date_txt, fmt, &tm, &fsec);
text *date_txt = PG_GETARG_TEXT_P(0);
text *fmt = PG_GETARG_TEXT_P(1);
DateADT result;
- struct pg_tm tm;
+ struct pg_tm tm;
fsec_t fsec;
do_to_timestamp(date_txt, fmt, &tm, &fsec);
*/
static void
do_to_timestamp(text *date_txt, text *fmt,
- struct pg_tm *tm, fsec_t *fsec)
+ struct pg_tm * tm, fsec_t *fsec)
{
FormatNode *format;
TmFromChar tmfc;
- int fmt_len;
+ int fmt_len;
ZERO_tm(tm);
*fsec = 0;
if (fmt_len)
{
- int date_len;
- char *fmt_str;
- char *date_str;
- bool incache;
-
+ int date_len;
+ char *fmt_str;
+ char *date_str;
+ bool incache;
+
fmt_str = (char *) palloc(fmt_len + 1);
memcpy(fmt_str, VARDATA(fmt), fmt_len);
*(fmt_str + fmt_len) = '\0';
parse_format(format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
- (format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
+ (format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
}
else
{
* Use cache buffers
*/
DCHCacheEntry *ent;
+
incache = TRUE;
if ((ent = DCH_cache_search(fmt_str)) == NULL)
parse_format(ent->format, fmt_str, DCH_keywords,
DCH_suff, DCH_index, DCH_TYPE, NULL);
- (ent->format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
+ (ent->format + fmt_len)->type = NODE_TYPE_END; /* Paranoia? */
#ifdef DEBUG_TO_FROM_CHAR
/* dump_node(ent->format, fmt_len); */
/* dump_index(DCH_keywords, DCH_index); */
if (!tm->tm_year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_DATETIME_FORMAT),
- errmsg("cannot calculate day of year without year information")));
+ errmsg("cannot calculate day of year without year information")));
y = ysum[isleap(tm->tm_year)];
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.86 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/geo_ops.c,v 1.87 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type box: \"%s\"", str)));
+ errmsg("invalid input syntax for type box: \"%s\"", str)));
/* reorder corners if necessary... */
if (box->high.x < box->low.x)
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type line: \"%s\"", str)));
+ errmsg("invalid input syntax for type line: \"%s\"", str)));
line = (LINE *) palloc(sizeof(LINE));
line_construct_pts(line, &lseg.p[0], &lseg.p[1]);
Datum
path_area(PG_FUNCTION_ARGS)
{
- PATH *path = PG_GETARG_PATH_P(0);
- double area = 0.0;
- int i,j;
+ PATH *path = PG_GETARG_PATH_P(0);
+ double area = 0.0;
+ int i,
+ j;
if (!path->closed)
PG_RETURN_NULL();
- for (i = 0; i < path->npts; i++) {
+ for (i = 0; i < path->npts; i++)
+ {
j = (i + 1) % path->npts;
area += path->p[i].x * path->p[j].y;
area -= path->p[i].y * path->p[j].x;
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
s = str;
while (isspace((unsigned char) *s))
&& (!((depth == 0) && (*s == '\0'))) && !((depth >= 1) && (*s == RDELIM)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type path: \"%s\"", str)));
+ errmsg("invalid input syntax for type path: \"%s\"", str)));
path->closed = (!isopen);
if (npts < 0 || npts >= (int32) ((INT_MAX - offsetof(PATH, p[0])) / sizeof(Point)))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid number of points in external \"path\" value")));
+ errmsg("invalid number of points in external \"path\" value")));
size = offsetof(PATH, p[0]) +sizeof(path->p[0]) * npts;
path = (PATH *) palloc(size);
if (!pair_decode(str, &x, &y, &s) || (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type point: \"%s\"", str)));
+ errmsg("invalid input syntax for type point: \"%s\"", str)));
point = (Point *) palloc(sizeof(Point));
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type lseg: \"%s\"", str)));
+ errmsg("invalid input syntax for type lseg: \"%s\"", str)));
#ifdef NOT_USED
lseg->m = point_sl(&lseg->p[0], &lseg->p[1]);
if ((npts = pair_count(str, ',')) <= 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
size = offsetof(POLYGON, p[0]) +sizeof(poly->p[0]) * npts;
poly = (POLYGON *) palloc0(size); /* zero any holes */
|| (*s != '\0'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type polygon: \"%s\"", str)));
+ errmsg("invalid input syntax for type polygon: \"%s\"", str)));
make_bound_box(poly);
if (!pair_decode(s, &circle->center.x, &circle->center.y, &s))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
if (*s == DELIM)
s++;
if ((!single_decode(s, &circle->radius, &s)) || (circle->radius < 0))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
while (depth > 0)
{
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
}
if (*s != '\0')
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type circle: \"%s\"", str)));
+ errmsg("invalid input syntax for type circle: \"%s\"", str)));
PG_RETURN_CIRCLE_P(circle);
}
if (FPzero(circle->radius))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot convert circle with radius zero to polygon")));
+ errmsg("cannot convert circle with radius zero to polygon")));
if (npts < 2)
ereport(ERROR,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.61 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int.c,v 1.62 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int32 current;
int32 finish;
int32 step;
-} generate_series_fctx;
+} generate_series_fctx;
/*****************************************************************************
* USER I/O ROUTINES *
Datum
generate_series_step_int4(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- generate_series_fctx *fctx;
- int32 result;
- MemoryContext oldcontext;
+ FuncCallContext *funcctx;
+ generate_series_fctx *fctx;
+ int32 result;
+ MemoryContext oldcontext;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- int32 start = PG_GETARG_INT32(0);
- int32 finish = PG_GETARG_INT32(1);
- int32 step = 1;
+ int32 start = PG_GETARG_INT32(0);
+ int32 finish = PG_GETARG_INT32(1);
+ int32 step = 1;
/* see if we were given an explicit step size */
if (PG_NARGS() == 3)
fctx = (generate_series_fctx *) palloc(sizeof(generate_series_fctx));
/*
- * Use fctx to keep state from call to call.
- * Seed current with the original start value
+ * Use fctx to keep state from call to call. Seed current with the
+ * original start value
*/
fctx->current = start;
fctx->finish = finish;
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for
- * this iteration
+ * get the saved state and use current as the result for this
+ * iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
-
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.54 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/int8.c,v 1.55 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int64 current;
int64 finish;
int64 step;
-} generate_series_fctx;
+} generate_series_fctx;
/***********************************************************************
**
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type bigint: \"%s\"", str)));
+ errmsg("invalid input syntax for type bigint: \"%s\"", str)));
}
/* process digits */
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type bigint: \"%s\"", str)));
+ errmsg("invalid input syntax for type bigint: \"%s\"", str)));
}
*result = (sign < 0) ? -tmp : tmp;
Datum
generate_series_step_int8(PG_FUNCTION_ARGS)
{
- FuncCallContext *funcctx;
- generate_series_fctx *fctx;
- int64 result;
- MemoryContext oldcontext;
+ FuncCallContext *funcctx;
+ generate_series_fctx *fctx;
+ int64 result;
+ MemoryContext oldcontext;
/* stuff done only on the first call of the function */
if (SRF_IS_FIRSTCALL())
{
- int64 start = PG_GETARG_INT64(0);
- int64 finish = PG_GETARG_INT64(1);
- int64 step = 1;
+ int64 start = PG_GETARG_INT64(0);
+ int64 finish = PG_GETARG_INT64(1);
+ int64 step = 1;
/* see if we were given an explicit step size */
if (PG_NARGS() == 3)
fctx = (generate_series_fctx *) palloc(sizeof(generate_series_fctx));
/*
- * Use fctx to keep state from call to call.
- * Seed current with the original start value
+ * Use fctx to keep state from call to call. Seed current with the
+ * original start value
*/
fctx->current = start;
fctx->finish = finish;
funcctx = SRF_PERCALL_SETUP();
/*
- * get the saved state and use current as the result for
- * this iteration
+ * get the saved state and use current as the result for this
+ * iteration
*/
fctx = funcctx->user_fctx;
result = fctx->current;
/* do when there is no more left */
SRF_RETURN_DONE(funcctx);
}
-
/*
* PostgreSQL type definitions for MAC addresses.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.33 2003/11/29 19:51:58 pgsql Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/mac.c,v 1.34 2004/08/29 05:06:49 momjian Exp $
*/
#include "postgres.h"
if (count != 6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
+ errmsg("invalid input syntax for type macaddr: \"%s\"", str)));
if ((a < 0) || (a > 255) || (b < 0) || (b > 255) ||
(c < 0) || (c > 255) || (d < 0) || (d > 255) ||
(e < 0) || (e > 255) || (f < 0) || (f > 255))
ereport(ERROR,
(errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE),
- errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
+ errmsg("invalid octet value in \"macaddr\" value: \"%s\"", str)));
result = (macaddr *) palloc(sizeof(macaddr));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.37 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/misc.c,v 1.38 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Functions to send signals to other backends.
*/
-static int pg_signal_backend(int pid, int sig)
+static int
+pg_signal_backend(int pid, int sig)
{
- if (!superuser())
+ if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- (errmsg("must be superuser to signal other server processes"))));
-
+ (errmsg("must be superuser to signal other server processes"))));
+
if (!IsBackendPid(pid))
{
/*
- * This is just a warning so a loop-through-resultset will not abort
- * if one backend terminated on it's own during the run
+ * This is just a warning so a loop-through-resultset will not
+ * abort if one backend terminated on it's own during the run
*/
ereport(WARNING,
- (errmsg("PID %d is not a PostgreSQL server process", pid)));
+ (errmsg("PID %d is not a PostgreSQL server process", pid)));
return 0;
}
- if (kill(pid, sig))
+ if (kill(pid, sig))
{
/* Again, just a warning to allow loops */
ereport(WARNING,
- (errmsg("could not send signal to process %d: %m",pid)));
+ (errmsg("could not send signal to process %d: %m", pid)));
return 0;
}
return 1;
Datum
pg_cancel_backend(PG_FUNCTION_ARGS)
{
- PG_RETURN_INT32(pg_signal_backend(PG_GETARG_INT32(0),SIGINT));
+ PG_RETURN_INT32(pg_signal_backend(PG_GETARG_INT32(0), SIGINT));
}
#ifdef NOT_USED
Datum
pg_terminate_backend(PG_FUNCTION_ARGS)
{
- PG_RETURN_INT32(pg_signal_backend(PG_GETARG_INT32(0),SIGTERM));
+ PG_RETURN_INT32(pg_signal_backend(PG_GETARG_INT32(0), SIGTERM));
}
-
#endif
/* Function to find out which databases make use of a tablespace */
-typedef struct
+typedef struct
{
- char *location;
- DIR *dirdesc;
+ char *location;
+ DIR *dirdesc;
} ts_db_fctx;
-Datum pg_tablespace_databases(PG_FUNCTION_ARGS)
+Datum
+pg_tablespace_databases(PG_FUNCTION_ARGS)
{
FuncCallContext *funcctx;
struct dirent *de;
if (SRF_IS_FIRSTCALL())
{
MemoryContext oldcontext;
- Oid tablespaceOid=PG_GETARG_OID(0);
+ Oid tablespaceOid = PG_GETARG_OID(0);
- funcctx=SRF_FIRSTCALL_INIT();
+ funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
fctx = palloc(sizeof(ts_db_fctx));
/*
- * size = path length + tablespace dirname length
- * + 2 dir sep chars + oid + terminator
+ * size = path length + tablespace dirname length + 2 dir sep
+ * chars + oid + terminator
*/
- fctx->location = (char*) palloc(strlen(DataDir) + 11 + 10 + 1);
+ fctx->location = (char *) palloc(strlen(DataDir) + 11 + 10 + 1);
if (tablespaceOid == GLOBALTABLESPACE_OID)
{
fctx->dirdesc = NULL;
sprintf(fctx->location, "%s/base", DataDir);
else
sprintf(fctx->location, "%s/pg_tblspc/%u", DataDir,
- tablespaceOid);
-
+ tablespaceOid);
+
fctx->dirdesc = AllocateDir(fctx->location);
if (!fctx->dirdesc)
errmsg("could not open directory \"%s\": %m",
fctx->location)));
ereport(WARNING,
- (errmsg("%u is not a tablespace oid", tablespaceOid)));
+ (errmsg("%u is not a tablespace oid", tablespaceOid)));
}
}
funcctx->user_fctx = fctx;
MemoryContextSwitchTo(oldcontext);
}
- funcctx=SRF_PERCALL_SETUP();
- fctx = (ts_db_fctx*) funcctx->user_fctx;
+ funcctx = SRF_PERCALL_SETUP();
+ fctx = (ts_db_fctx *) funcctx->user_fctx;
- if (!fctx->dirdesc) /* not a tablespace */
+ if (!fctx->dirdesc) /* not a tablespace */
SRF_RETURN_DONE(funcctx);
while ((de = readdir(fctx->dirdesc)) != NULL)
{
- char *subdir;
- DIR *dirdesc;
+ char *subdir;
+ DIR *dirdesc;
+
+ Oid datOid = atooid(de->d_name);
- Oid datOid = atooid(de->d_name);
/* this test skips . and .., but is awfully weak */
if (!datOid)
continue;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.124 2004/08/29 04:12:51 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/nabstime.c,v 1.125 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
abstime2tm(AbsoluteTime _time, int *tzp, struct pg_tm * tm, char **tzn)
{
pg_time_t time = (pg_time_t) _time;
- struct pg_tm *tx;
+ struct pg_tm *tx;
/*
* If HasCTZSet is true then we have a brute force time zone
AbsoluteTime result;
fsec_t fsec;
int tz = 0;
- struct pg_tm date,
+ struct pg_tm date,
*tm = &date;
int dterr;
char *field[MAXDATEFIELDS];
char *result;
int tz;
double fsec = 0;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
char zone[MAXDATELEN + 1],
abstime_cmp_internal(AbsoluteTime a, AbsoluteTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any non-INVALID.
- * This is somewhat arbitrary; the important thing is to have a
- * consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any
+ * non-INVALID. This is somewhat arbitrary; the important thing is to
+ * have a consistent sort order.
*/
if (a == INVALID_ABSTIME)
{
AbsoluteTime result;
fsec_t fsec;
int tz;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_IS_NOBEGIN(timestamp))
{
AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
Timestamp result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
char zone[MAXDATELEN + 1],
TimestampTz timestamp = PG_GETARG_TIMESTAMP(0);
AbsoluteTime result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_IS_NOBEGIN(timestamp))
{
AbsoluteTime abstime = PG_GETARG_ABSOLUTETIME(0);
TimestampTz result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
char zone[MAXDATELEN + 1],
{
char *str = PG_GETARG_CSTRING(0);
RelativeTime result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int dtype;
{
RelativeTime time = PG_GETARG_RELATIVETIME(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
char buf[MAXDATELEN + 1];
interval->status == T_INTERVAL_VALID))
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid status in external \"tinterval\" value")));
+ errmsg("invalid status in external \"tinterval\" value")));
interval->data[0] = pq_getmsgint(buf, sizeof(interval->data[0]));
interval->data[1] = pq_getmsgint(buf, sizeof(interval->data[1]));
reltime_cmp_internal(RelativeTime a, RelativeTime b)
{
/*
- * We consider all INVALIDs to be equal and larger than any non-INVALID.
- * This is somewhat arbitrary; the important thing is to have a
- * consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any
+ * non-INVALID. This is somewhat arbitrary; the important thing is to
+ * have a consistent sort order.
*/
if (a == INVALID_RELTIME)
{
* tinterval comparison routines
*
* Note: comparison is based on the lengths of the intervals, not on
- * endpoint value. This is pretty bogus, but since it's only a legacy
+ * endpoint value. This is pretty bogus, but since it's only a legacy
* datatype I'm not going to propose changing it.
*/
static int
AbsoluteTime b_len;
/*
- * We consider all INVALIDs to be equal and larger than any non-INVALID.
- * This is somewhat arbitrary; the important thing is to have a
- * consistent sort order.
+ * We consider all INVALIDs to be equal and larger than any
+ * non-INVALID. This is somewhat arbitrary; the important thing is to
+ * have a consistent sort order.
*/
a_invalid = ((a->status == T_INTERVAL_INVAL) ||
(a->data[0] == INVALID_ABSTIME) ||
/*
* PostgreSQL type definitions for the INET and CIDR types.
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.52 2004/06/13 21:57:25 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/network.c,v 1.53 2004/08/29 05:06:49 momjian Exp $
*
* Jon Postel RIP 16 Oct 1998
*/
ip_family(addr) != PGSQL_AF_INET6)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid address family in external \"inet\" value")));
+ errmsg("invalid address family in external \"inet\" value")));
bits = pq_getmsgbyte(buf);
if (bits < 0 || bits > ip_maxbits(addr))
ereport(ERROR,
Datum
inet_client_addr(PG_FUNCTION_ARGS)
{
- Port *port = MyProcPort;
- char remote_host[NI_MAXHOST];
- int ret;
+ Port *port = MyProcPort;
+ char remote_host[NI_MAXHOST];
+ int ret;
if (port == NULL)
PG_RETURN_NULL();
- switch (port->raddr.addr.ss_family) {
- case AF_INET:
+ switch (port->raddr.addr.ss_family)
+ {
+ case AF_INET:
#ifdef HAVE_IPV6
- case AF_INET6:
+ case AF_INET6:
#endif
- break;
- default:
- PG_RETURN_NULL();
+ break;
+ default:
+ PG_RETURN_NULL();
}
remote_host[0] = '\0';
ret = getnameinfo_all(&port->raddr.addr, port->raddr.salen,
- remote_host, sizeof(remote_host),
- NULL, 0,
- NI_NUMERICHOST | NI_NUMERICSERV);
+ remote_host, sizeof(remote_host),
+ NULL, 0,
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
PG_RETURN_NULL();
Datum
inet_client_port(PG_FUNCTION_ARGS)
{
- Port *port = MyProcPort;
- char remote_port[NI_MAXSERV];
- int ret;
+ Port *port = MyProcPort;
+ char remote_port[NI_MAXSERV];
+ int ret;
if (port == NULL)
PG_RETURN_NULL();
- switch (port->raddr.addr.ss_family) {
- case AF_INET:
+ switch (port->raddr.addr.ss_family)
+ {
+ case AF_INET:
#ifdef HAVE_IPV6
- case AF_INET6:
+ case AF_INET6:
#endif
- break;
- default:
- PG_RETURN_NULL();
+ break;
+ default:
+ PG_RETURN_NULL();
}
remote_port[0] = '\0';
ret = getnameinfo_all(&port->raddr.addr, port->raddr.salen,
- NULL, 0,
- remote_port, sizeof(remote_port),
- NI_NUMERICHOST | NI_NUMERICSERV);
+ NULL, 0,
+ remote_port, sizeof(remote_port),
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
PG_RETURN_NULL();
Datum
inet_server_addr(PG_FUNCTION_ARGS)
{
- Port *port = MyProcPort;
- char local_host[NI_MAXHOST];
- int ret;
+ Port *port = MyProcPort;
+ char local_host[NI_MAXHOST];
+ int ret;
if (port == NULL)
PG_RETURN_NULL();
- switch (port->laddr.addr.ss_family) {
- case AF_INET:
+ switch (port->laddr.addr.ss_family)
+ {
+ case AF_INET:
#ifdef HAVE_IPV6
- case AF_INET6:
+ case AF_INET6:
#endif
- break;
- default:
- PG_RETURN_NULL();
+ break;
+ default:
+ PG_RETURN_NULL();
}
local_host[0] = '\0';
ret = getnameinfo_all(&port->laddr.addr, port->laddr.salen,
- local_host, sizeof(local_host),
- NULL, 0,
- NI_NUMERICHOST | NI_NUMERICSERV);
+ local_host, sizeof(local_host),
+ NULL, 0,
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
PG_RETURN_NULL();
Datum
inet_server_port(PG_FUNCTION_ARGS)
{
- Port *port = MyProcPort;
- char local_port[NI_MAXSERV];
- int ret;
+ Port *port = MyProcPort;
+ char local_port[NI_MAXSERV];
+ int ret;
if (port == NULL)
PG_RETURN_NULL();
- switch (port->laddr.addr.ss_family) {
- case AF_INET:
+ switch (port->laddr.addr.ss_family)
+ {
+ case AF_INET:
#ifdef HAVE_IPV6
- case AF_INET6:
+ case AF_INET6:
#endif
- break;
- default:
- PG_RETURN_NULL();
+ break;
+ default:
+ PG_RETURN_NULL();
}
local_port[0] = '\0';
ret = getnameinfo_all(&port->laddr.addr, port->laddr.salen,
- NULL, 0,
- local_port, sizeof(local_port),
- NI_NUMERICHOST | NI_NUMERICSERV);
+ NULL, 0,
+ local_port, sizeof(local_port),
+ NI_NUMERICHOST | NI_NUMERICSERV);
if (ret)
PG_RETURN_NULL();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/not_in.c,v 1.40 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/not_in.c,v 1.41 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
errmsg("invalid name syntax"),
- errhint("Must provide \"relationname.columnname\".")));
+ errhint("Must provide \"relationname.columnname\".")));
attribute = strVal(llast(names));
names = list_truncate(names, nnames - 1);
relrv = makeRangeVarFromNameList(names);
* Copyright (c) 1998-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.77 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numeric.c,v 1.78 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void trunc_var(NumericVar *var, int rscale);
static void strip_var(NumericVar *var);
static void compute_bucket(Numeric operand, Numeric bound1, Numeric bound2,
- NumericVar *count_var, NumericVar *result_var);
+ NumericVar *count_var, NumericVar *result_var);
/* ----------------------------------------------------------------------
if (d < 0 || d >= NBASE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("invalid digit in external \"numeric\" value")));
+ errmsg("invalid digit in external \"numeric\" value")));
value.digits[i] = d;
}
if (count <= 0)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("count must be greater than zero")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("count must be greater than zero")));
init_var(&result_var);
init_var(&count_var);
{
case 0:
ereport(ERROR,
- (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
- errmsg("lower bound cannot equal upper bound")));
+ (errcode(ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION),
+ errmsg("lower bound cannot equal upper bound")));
- /* bound1 < bound2 */
+ /* bound1 < bound2 */
case -1:
if (cmp_numerics(operand, bound1) < 0)
set_var_from_var(&const_zero, &result_var);
&count_var, &result_var);
break;
- /* bound1 > bound2 */
+ /* bound1 > bound2 */
case 1:
if (cmp_numerics(operand, bound1) > 0)
set_var_from_var(&const_zero, &result_var);
compute_bucket(Numeric operand, Numeric bound1, Numeric bound2,
NumericVar *count_var, NumericVar *result_var)
{
- NumericVar bound1_var;
- NumericVar bound2_var;
- NumericVar operand_var;
+ NumericVar bound1_var;
+ NumericVar bound2_var;
+ NumericVar operand_var;
init_var(&bound1_var);
init_var(&bound2_var);
free_var(&bound1_var);
free_var(&bound2_var);
free_var(&operand_var);
-}
+}
/* ----------------------------------------------------------------------
*
trunc_var(&arg2_trunc, 0);
/*
- * Return special SQLSTATE error codes for a few conditions
- * mandated by the standard.
+ * Return special SQLSTATE error codes for a few conditions mandated
+ * by the standard.
*/
if ((cmp_var(&arg1, &const_zero) == 0 &&
cmp_var(&arg2, &const_zero) < 0) ||
static int32
numericvar_to_int4(NumericVar *var)
{
- int32 result;
- int64 val;
+ int32 result;
+ int64 val;
if (!numericvar_to_int8(var, &val))
ereport(ERROR,
if (!isdigit((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"", str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"", str)));
decdigits = (unsigned char *) palloc(strlen(cp) + DEC_DIGITS * 2);
if (have_dp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
have_dp = TRUE;
cp++;
}
if (endptr == cp)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp = endptr;
if (exponent > NUMERIC_MAX_PRECISION ||
exponent < -NUMERIC_MAX_PRECISION)
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
dweight += (int) exponent;
dscale -= (int) exponent;
if (dscale < 0)
if (!isspace((unsigned char) *cp))
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type numeric: \"%s\"",
- str)));
+ errmsg("invalid input syntax for type numeric: \"%s\"",
+ str)));
cp++;
}
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
/* shouldn't happen ... */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("invalid input syntax for type double precision: \"%s\"",
- tmp)));
+ errmsg("invalid input syntax for type double precision: \"%s\"",
+ tmp)));
}
pfree(tmp);
}
/*
- * SQL2003 defines sqrt() in terms of power, so we need to emit
- * the right SQLSTATE error code if the operand is negative.
+ * SQL2003 defines sqrt() in terms of power, so we need to emit the
+ * right SQLSTATE error code if the operand is negative.
*/
if (stat < 0)
ereport(ERROR,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.64 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/numutils.c,v 1.65 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
s)));
/*
- * Skip any trailing whitespace; if anything but whitespace
- * remains before the terminating character, bail out
+ * Skip any trailing whitespace; if anything but whitespace remains
+ * before the terminating character, bail out
*/
while (*badp != c && isspace((unsigned char) *badp))
badp++;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.58 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oid.c,v 1.59 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* In releases prior to 8.0, we accepted an empty string as valid
- * input (yielding an OID of 0). In 8.0, we accept empty strings,
- * but emit a warning noting that the feature is deprecated. In
- * 8.1+, the warning should be replaced by an error.
+ * input (yielding an OID of 0). In 8.0, we accept empty strings, but
+ * emit a warning noting that the feature is deprecated. In 8.1+, the
+ * warning should be replaced by an error.
*/
if (*s == '\0')
ereport(WARNING,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.54 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/oracle_compat.c,v 1.55 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* If the system provides the needed functions for wide-character manipulation
* (which are all standardized by C99), then we implement upper/lower/initcap
- * using wide-character functions. Otherwise we use the traditional
+ * using wide-character functions. Otherwise we use the traditional
* functions, which of course will not work as desired in multibyte character
* sets. Note that in either case we are effectively assuming that the
* database character encoding matches the encoding implied by LC_CTYPE.
{
int nbytes = VARSIZE(txt) - VARHDRSZ;
char *workstr;
- wchar_t *result;
+ wchar_t *result;
size_t ncodes;
/* Overflow paranoia */
if (ncodes == (size_t) -1)
{
/*
- * Invalid multibyte character encountered. We try to give a useful
- * error message by letting pg_verifymbstr check the string. But
- * it's possible that the string is OK to us, and not OK to mbstowcs
- * --- this suggests that the LC_CTYPE locale is different from the
- * database encoding. Give a generic error message if verifymbstr
- * can't find anything wrong.
+ * Invalid multibyte character encountered. We try to give a
+ * useful error message by letting pg_verifymbstr check the
+ * string. But it's possible that the string is OK to us, and not
+ * OK to mbstowcs --- this suggests that the LC_CTYPE locale is
+ * different from the database encoding. Give a generic error
+ * message if verifymbstr can't find anything wrong.
*/
pg_verifymbstr(workstr, nbytes, false);
ereport(ERROR,
return result;
}
-
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
/********************************************************************
{
text *string = PG_GETARG_TEXT_P(0);
text *result;
- wchar_t *workspace;
+ wchar_t *workspace;
int i;
workspace = texttowcs(string);
PG_RETURN_TEXT_P(result);
}
else
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
{
text *string = PG_GETARG_TEXT_P_COPY(0);
char *ptr;
int m;
- /* Since we copied the string, we can scribble directly on the value */
+ /*
+ * Since we copied the string, we can scribble directly on the
+ * value
+ */
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
{
text *string = PG_GETARG_TEXT_P(0);
text *result;
- wchar_t *workspace;
+ wchar_t *workspace;
int i;
workspace = texttowcs(string);
PG_RETURN_TEXT_P(result);
}
else
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
{
text *string = PG_GETARG_TEXT_P_COPY(0);
char *ptr;
int m;
- /* Since we copied the string, we can scribble directly on the value */
+ /*
+ * Since we copied the string, we can scribble directly on the
+ * value
+ */
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
{
text *string = PG_GETARG_TEXT_P(0);
text *result;
- wchar_t *workspace;
+ wchar_t *workspace;
int wasalnum = 0;
int i;
PG_RETURN_TEXT_P(result);
}
else
-#endif /* USE_WIDE_UPPER_LOWER */
+#endif /* USE_WIDE_UPPER_LOWER */
{
text *string = PG_GETARG_TEXT_P_COPY(0);
int wasalnum = 0;
char *ptr;
int m;
- /* Since we copied the string, we can scribble directly on the value */
+ /*
+ * Since we copied the string, we can scribble directly on the
+ * value
+ */
ptr = VARDATA(string);
m = VARSIZE(string) - VARHDRSZ;
********************************************************************/
Datum
-chr(PG_FUNCTION_ARGS)
+chr (PG_FUNCTION_ARGS)
{
int32 cvalue = PG_GETARG_INT32(0);
text *result;
*
* Portions Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.27 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/pg_locale.c,v 1.28 2004/08/29 05:06:49 momjian Exp $
*
*-----------------------------------------------------------------------
*/
if (!setlocale(LC_MESSAGES, value))
{
#ifdef WIN32
+
/*
- * Win32 returns NULL when you set LC_MESSAGES to "". So don't
- * complain unless we're trying to set it to something else.
+ * Win32 returns NULL when you set LC_MESSAGES to "". So
+ * don't complain unless we're trying to set it to something
+ * else.
*/
if (value[0])
return NULL;
}
else
value = locale_xxx_assign(LC_MESSAGES, value, false, source);
-#endif /* LC_MESSAGES */
+#endif /* LC_MESSAGES */
return value;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.90 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/regproc.c,v 1.91 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
else if (clist->next != NULL)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
if (clist == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("function \"%s\" does not exist", pro_name_or_oid)));
+ errmsg("function \"%s\" does not exist", pro_name_or_oid)));
result = clist->oid;
if (matches == 0)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("operator does not exist: %s", opr_name_or_oid)));
+ errmsg("operator does not exist: %s", opr_name_or_oid)));
else if (matches > 1)
ereport(ERROR,
(errcode(ERRCODE_AMBIGUOUS_FUNCTION),
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_TABLE),
- errmsg("relation \"%s\" does not exist", class_name_or_oid)));
+ errmsg("relation \"%s\" does not exist", class_name_or_oid)));
/* We assume there can be only one match */
else
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("type \"%s\" does not exist", typ_name_or_oid)));
+ errmsg("type \"%s\" does not exist", typ_name_or_oid)));
/* We assume there can be only one match */
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.70 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ri_triggers.c,v 1.71 2004/08/29 05:06:49 momjian Exp $
*
* ----------
*/
if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
{
if (HeapTupleHeaderGetXmin(old_row->t_data) !=
- GetCurrentTransactionId() &&
- ri_KeysEqual(fk_rel, old_row, new_row, &qkey,
+ GetCurrentTransactionId() &&
+ ri_KeysEqual(fk_rel, old_row, new_row, &qkey,
RI_KEYPAIR_FK_IDX))
{
heap_close(pk_rel, RowShareLock);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_SELECT,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_SELECT,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_DELETE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, new_row,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_SELECT,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_SELECT,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
ri_PerformCheck(&qkey, qplan,
fk_rel, pk_rel,
old_row, NULL,
- true, /* must detect new rows */
+ true, /* must detect new rows */
SPI_OK_UPDATE,
tgargs[RI_CONSTRAINT_NAME_ARGNO]);
(tgnargs % 2) != 0)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" called with wrong number of trigger arguments",
- "RI_FKey_keyequal_upd")));
+ errmsg("function \"%s\" called with wrong number of trigger arguments",
+ "RI_FKey_keyequal_upd")));
/*
* Nothing to do if no column names to compare given
if (!OidIsValid(trigdata->tg_trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigdata->tg_trigger->tgname,
- RelationGetRelationName(trigdata->tg_relation)),
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigdata->tg_trigger->tgname,
+ RelationGetRelationName(trigdata->tg_relation)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
fk_rel = heap_open(trigdata->tg_trigger->tgconstrrelid, AccessShareLock);
{
const char *constrname = fkconstraint->constr_name;
char querystr[MAX_QUOTED_REL_NAME_LEN * 2 + 250 +
- (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4)+1)];
+ (MAX_QUOTED_NAME_LEN + 32) * ((RI_MAX_NUMKEYS * 4) + 1)];
char pkrelname[MAX_QUOTED_REL_NAME_LEN];
char relname[MAX_QUOTED_REL_NAME_LEN];
char attname[MAX_QUOTED_NAME_LEN];
char fkattname[MAX_QUOTED_NAME_LEN];
const char *sep;
- ListCell *l;
- ListCell *l2;
+ ListCell *l;
+ ListCell *l2;
int old_work_mem;
char workmembuf[32];
int spi_result;
- void *qplan;
+ void *qplan;
/*
* Check to make sure current user has enough permissions to do the
- * test query. (If not, caller can fall back to the trigger method,
+ * test query. (If not, caller can fall back to the trigger method,
* which works because it changes user IDs on the fly.)
*
* XXX are there any other show-stopper conditions to check?
*/
if (pg_class_aclcheck(RelationGetRelid(rel), GetUserId(), ACL_SELECT) != ACLCHECK_OK)
return false;
- if (pg_class_aclcheck(RelationGetRelid(pkrel), GetUserId(), ACL_SELECT) != ACLCHECK_OK)
+ if (pg_class_aclcheck(RelationGetRelid(pkrel), GetUserId(), ACL_SELECT) != ACLCHECK_OK)
return false;
/*----------
* The query string built is:
- * SELECT fk.keycols FROM ONLY relname fk
- * LEFT OUTER JOIN ONLY pkrelname pk
- * ON (pk.pkkeycol1=fk.keycol1 [AND ...])
- * WHERE pk.pkkeycol1 IS NULL AND
+ * SELECT fk.keycols FROM ONLY relname fk
+ * LEFT OUTER JOIN ONLY pkrelname pk
+ * ON (pk.pkkeycol1=fk.keycol1 [AND ...])
+ * WHERE pk.pkkeycol1 IS NULL AND
* For MATCH unspecified:
- * (fk.keycol1 IS NOT NULL [AND ...])
+ * (fk.keycol1 IS NOT NULL [AND ...])
* For MATCH FULL:
- * (fk.keycol1 IS NOT NULL [OR ...])
+ * (fk.keycol1 IS NOT NULL [OR ...])
*----------
*/
sprintf(querystr, "SELECT ");
- sep="";
+ sep = "";
foreach(l, fkconstraint->fk_attrs)
{
quoteOneName(attname, strVal(lfirst(l)));
" FROM ONLY %s fk LEFT OUTER JOIN ONLY %s pk ON (",
relname, pkrelname);
- sep="";
+ sep = "";
forboth(l, fkconstraint->pk_attrs, l2, fkconstraint->fk_attrs)
{
quoteOneName(attname, strVal(lfirst(l)));
sep, attname, fkattname);
sep = " AND ";
}
+
/*
* It's sufficient to test any one pk attribute for null to detect a
* join failure.
snprintf(querystr + strlen(querystr), sizeof(querystr) - strlen(querystr),
") WHERE pk.%s IS NULL AND (", attname);
- sep="";
+ sep = "";
foreach(l, fkconstraint->fk_attrs)
{
quoteOneName(attname, strVal(lfirst(l)));
switch (fkconstraint->fk_matchtype)
{
case FKCONSTR_MATCH_UNSPECIFIED:
- sep=" AND ";
+ sep = " AND ";
break;
case FKCONSTR_MATCH_FULL:
- sep=" OR ";
+ sep = " OR ";
break;
case FKCONSTR_MATCH_PARTIAL:
ereport(ERROR,
")");
/*
- * Temporarily increase work_mem so that the check query can be executed
- * more efficiently. It seems okay to do this because the query is simple
- * enough to not use a multiple of work_mem, and one typically would not
- * have many large foreign-key validations happening concurrently. So
- * this seems to meet the criteria for being considered a "maintenance"
- * operation, and accordingly we use maintenance_work_mem.
+ * Temporarily increase work_mem so that the check query can be
+ * executed more efficiently. It seems okay to do this because the
+ * query is simple enough to not use a multiple of work_mem, and one
+ * typically would not have many large foreign-key validations
+ * happening concurrently. So this seems to meet the criteria for
+ * being considered a "maintenance" operation, and accordingly we use
+ * maintenance_work_mem.
*
* We do the equivalent of "SET LOCAL work_mem" so that transaction abort
* will restore the old value if we lose control due to an error.
/*
* Generate the plan. We don't need to cache it, and there are no
- * arguments to the plan.
+ * arguments to the plan.
*/
qplan = SPI_prepare(querystr, 0, NULL);
/*
* Run the plan. For safety we force a current query snapshot to be
- * used. (In serializable mode, this arguably violates serializability,
- * but we really haven't got much choice.) We need at most one tuple
- * returned, so pass limit = 1.
+ * used. (In serializable mode, this arguably violates
+ * serializability, but we really haven't got much choice.) We need
+ * at most one tuple returned, so pass limit = 1.
*/
spi_result = SPI_execp_current(qplan, NULL, NULL, true, 1);
TupleDesc tupdesc = SPI_tuptable->tupdesc;
int nkeys = list_length(fkconstraint->fk_attrs);
int i;
- RI_QueryKey qkey;
+ RI_QueryKey qkey;
/*
* If it's MATCH FULL, and there are any nulls in the FK keys,
- * complain about that rather than the lack of a match. MATCH FULL
- * disallows partially-null FK rows.
+ * complain about that rather than the lack of a match. MATCH
+ * FULL disallows partially-null FK rows.
*/
if (fkconstraint->fk_matchtype == FKCONSTR_MATCH_FULL)
{
- bool isnull = false;
+ bool isnull = false;
for (i = 1; i <= nkeys; i++)
{
elog(ERROR, "SPI_finish failed");
/*
- * Restore work_mem for the remainder of the current transaction.
- * This is another SET LOCAL, so it won't affect the session value,
- * nor any tentative value if there is one.
+ * Restore work_mem for the remainder of the current transaction. This
+ * is another SET LOCAL, so it won't affect the session value, nor any
+ * tentative value if there is one.
*/
snprintf(workmembuf, sizeof(workmembuf), "%d", old_work_mem);
(void) set_config_option("work_mem", workmembuf,
if (!CALLED_AS_TRIGGER(fcinfo))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" was not called by trigger manager", funcname)));
+ errmsg("function \"%s\" was not called by trigger manager", funcname)));
/*
* Check proper event
!TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
+ errmsg("function \"%s\" must be fired AFTER ROW", funcname)));
switch (tgkind)
{
(tgnargs % 2) != 0)
ereport(ERROR,
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
- errmsg("function \"%s\" called with wrong number of trigger arguments",
- funcname)));
+ errmsg("function \"%s\" called with wrong number of trigger arguments",
+ funcname)));
/*
* Check that tgconstrrelid is known. We need to check here because
if (!OidIsValid(trigdata->tg_trigger->tgconstrrelid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("no target table given for trigger \"%s\" on table \"%s\"",
- trigdata->tg_trigger->tgname,
- RelationGetRelationName(trigdata->tg_relation)),
+ errmsg("no target table given for trigger \"%s\" on table \"%s\"",
+ trigdata->tg_trigger->tgname,
+ RelationGetRelationName(trigdata->tg_relation)),
errhint("Remove this referential integrity trigger and its mates, then do ALTER TABLE ADD CONSTRAINT.")));
}
/*
* In READ COMMITTED mode, we just need to make sure the regular query
* snapshot is up-to-date, and we will see all rows that could be
- * interesting. In SERIALIZABLE mode, we can't update the regular query
- * snapshot. If the caller passes detectNewRows == false then it's okay
- * to do the query with the transaction snapshot; otherwise we tell the
- * executor to force a current snapshot (and error out if it finds any
- * rows under current snapshot that wouldn't be visible per the
- * transaction snapshot).
+ * interesting. In SERIALIZABLE mode, we can't update the regular
+ * query snapshot. If the caller passes detectNewRows == false then
+ * it's okay to do the query with the transaction snapshot; otherwise
+ * we tell the executor to force a current snapshot (and error out if
+ * it finds any rows under current snapshot that wouldn't be visible
+ * per the transaction snapshot).
*/
if (IsXactIsoLevelSerializable)
- {
useCurrentSnapshot = detectNewRows;
- }
else
{
SetQuerySnapshot();
errhint("This is most likely due to a rule having rewritten the query.")));
/*
- * Determine which relation to complain about. If tupdesc wasn't
+ * Determine which relation to complain about. If tupdesc wasn't
* passed by caller, assume the violator tuple came from there.
*/
onfk = (qkey->constr_queryno == RI_PLAN_CHECK_LOOKUPPK);
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("insert or update on table \"%s\" violates foreign key constraint \"%s\"",
RelationGetRelationName(fk_rel), constrname),
- errdetail("Key (%s)=(%s) is not present in table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(pk_rel))));
+ errdetail("Key (%s)=(%s) is not present in table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(pk_rel))));
else
ereport(ERROR,
(errcode(ERRCODE_FOREIGN_KEY_VIOLATION),
errmsg("update or delete on \"%s\" violates foreign key constraint \"%s\" on \"%s\"",
RelationGetRelationName(pk_rel),
constrname, RelationGetRelationName(fk_rel)),
- errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
- key_names, key_values,
- RelationGetRelationName(fk_rel))));
+ errdetail("Key (%s)=(%s) is still referenced from table \"%s\".",
+ key_names, key_values,
+ RelationGetRelationName(fk_rel))));
}
/* ----------
if (!OidIsValid(typentry->eq_opr_finfo.fn_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("could not identify an equality operator for type %s",
- format_type_be(typeid))));
+ errmsg("could not identify an equality operator for type %s",
+ format_type_be(typeid))));
/*
* Call the type specific '=' function
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.5 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/rowtypes.c,v 1.6 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
StringInfoData buf;
/*
- * Use the passed type unless it's RECORD; we can't support input
- * of anonymous types, mainly because there's no good way to figure
- * out which anonymous type is wanted. Note that for RECORD,
- * what we'll probably actually get is RECORD's typelem, ie, zero.
+ * Use the passed type unless it's RECORD; we can't support input of
+ * anonymous types, mainly because there's no good way to figure out
+ * which anonymous type is wanted. Note that for RECORD, what we'll
+ * probably actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
nulls = (char *) palloc(ncolumns * sizeof(char));
/*
- * Scan the string. We use "buf" to accumulate the de-quoted data
- * for each column, which is then fed to the appropriate input converter.
+ * Scan the string. We use "buf" to accumulate the de-quoted data for
+ * each column, which is then fed to the appropriate input converter.
*/
ptr = string;
/* Allow leading whitespace */
/* Skip comma that separates prior field from this one */
if (*ptr == ',')
ptr++;
- else /* *ptr must be ')' */
+ else
+/* *ptr must be ')' */
ereport(ERROR,
(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"", string),
+ errmsg("malformed record literal: \"%s\"", string),
errdetail("Too few columns.")));
}
else
{
/* Extract string for this column */
- bool inquote = false;
+ bool inquote = false;
buf.len = 0;
buf.data[0] = '\0';
while (inquote || !(*ptr == ',' || *ptr == ')'))
{
- char ch = *ptr++;
+ char ch = *ptr++;
if (ch == '\0')
ereport(ERROR,
{
if (*ptr == '\0')
ereport(ERROR,
- (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
- errmsg("malformed record literal: \"%s\"",
- string),
- errdetail("Unexpected end of input.")));
+ (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+ errmsg("malformed record literal: \"%s\"",
+ string),
+ errdetail("Unexpected end of input.")));
appendStringInfoChar(&buf, *ptr++);
}
else if (ch == '\"')
values[i] = FunctionCall3(&column_info->proc,
CStringGetDatum(buf.data),
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod));
nulls[i] = ' ';
}
{
ColumnIOData *column_info = &my_extra->columns[i];
Oid column_type = tupdesc->attrs[i]->atttypid;
- char *value;
- char *tmp;
- bool nq;
+ char *value;
+ char *tmp;
+ bool nq;
/* Ignore dropped columns in datatype */
if (tupdesc->attrs[i]->attisdropped)
*/
if (column_info->column_type != column_type)
{
- bool typIsVarlena;
+ bool typIsVarlena;
getTypeOutputInfo(column_type,
&column_info->typiofunc,
value = DatumGetCString(FunctionCall3(&column_info->proc,
values[i],
- ObjectIdGetDatum(column_info->typioparam),
- Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
+ ObjectIdGetDatum(column_info->typioparam),
+ Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
/* Detect whether we need double quotes for this value */
nq = (value[0] == '\0'); /* force quotes for empty string */
char *nulls;
/*
- * Use the passed type unless it's RECORD; we can't support input
- * of anonymous types, mainly because there's no good way to figure
- * out which anonymous type is wanted. Note that for RECORD,
- * what we'll probably actually get is RECORD's typelem, ie, zero.
+ * Use the passed type unless it's RECORD; we can't support input of
+ * anonymous types, mainly because there's no good way to figure out
+ * which anonymous type is wanted. Note that for RECORD, what we'll
+ * probably actually get is RECORD's typelem, ie, zero.
*/
if (tupType == InvalidOid || tupType == RECORDOID)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("input of anonymous composite types is not implemented")));
+ errmsg("input of anonymous composite types is not implemented")));
tupTypmod = -1; /* for all non-anonymous types */
tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
ncolumns = tupdesc->natts;
{
/*
* Rather than copying data around, we just set up a phony
- * StringInfo pointing to the correct portion of the input buffer.
- * We assume we can scribble on the input buffer so as to maintain
- * the convention that StringInfos have a trailing null.
+ * StringInfo pointing to the correct portion of the input
+ * buffer. We assume we can scribble on the input buffer so as
+ * to maintain the convention that StringInfos have a trailing
+ * null.
*/
StringInfoData item_buf;
char csave;
values[i] = FunctionCall2(&column_info->proc,
PointerGetDatum(&item_buf),
- ObjectIdGetDatum(column_info->typioparam));
+ ObjectIdGetDatum(column_info->typioparam));
nulls[i] = ' ';
if (item_buf.cursor != itemlen)
ereport(ERROR,
(errcode(ERRCODE_INVALID_BINARY_REPRESENTATION),
- errmsg("improper binary format in record column %d",
- i + 1)));
+ errmsg("improper binary format in record column %d",
+ i + 1)));
buf->data[buf->cursor] = csave;
}
*/
if (column_info->column_type != column_type)
{
- bool typIsVarlena;
+ bool typIsVarlena;
getTypeBinaryOutputInfo(column_type,
&column_info->typiofunc,
outputbytes = DatumGetByteaP(FunctionCall2(&column_info->proc,
values[i],
- ObjectIdGetDatum(column_info->typioparam)));
+ ObjectIdGetDatum(column_info->typioparam)));
/* We assume the result will not have been toasted */
pq_sendint(&buf, VARSIZE(outputbytes) - VARHDRSZ, 4);
* back to source text
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.178 2004/08/19 20:57:41 tgl Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/ruleutils.c,v 1.179 2004/08/29 05:06:49 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
StringInfo buf);
static char *pg_get_ruledef_worker(Oid ruleoid, int prettyFlags);
static char *pg_get_indexdef_worker(Oid indexrelid, int colno,
- int prettyFlags);
+ int prettyFlags);
static char *pg_get_constraintdef_worker(Oid constraintId, bool fullCommand,
- int prettyFlags);
+ int prettyFlags);
static char *pg_get_expr_worker(text *expr, Oid relid, char *relname,
- int prettyFlags);
-static Oid get_constraint_index(Oid constraintRelOid, Oid constraintOid);
+ int prettyFlags);
+static Oid get_constraint_index(Oid constraintRelOid, Oid constraintOid);
static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
int prettyFlags);
static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc,
static void get_from_clause_item(Node *jtnode, Query *query,
deparse_context *context);
static void get_from_clause_alias(Alias *alias, int varno,
- Query *query, deparse_context *context);
+ Query *query, deparse_context *context);
static void get_from_clause_coldeflist(List *coldeflist,
deparse_context *context);
static void get_opclass_name(Oid opclass, Oid actual_datatype,
appendStringInfoChar(&buf, ')');
/*
- * If the index is in a different tablespace from its parent,
- * tell about that
+ * If the index is in a different tablespace from its parent, tell
+ * about that
*/
if (OidIsValid(idxrelrec->reltablespace) &&
idxrelrec->reltablespace != get_rel_tablespace(indrelid))
{
- char *spcname = get_tablespace_name(idxrelrec->reltablespace);
+ char *spcname = get_tablespace_name(idxrelrec->reltablespace);
if (spcname) /* just paranoia... */
{
Oid constraintId = PG_GETARG_OID(0);
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, 0)));
+ false, 0)));
}
Datum
prettyFlags = pretty ? PRETTYFLAG_PAREN | PRETTYFLAG_INDENT : 0;
PG_RETURN_TEXT_P(string_to_text(pg_get_constraintdef_worker(constraintId,
- false, prettyFlags)));
+ false, prettyFlags)));
}
/* Internal version that returns a palloc'd C string */
constraintId);
if (OidIsValid(indexOid))
{
- Oid reltablespace;
- Oid indtablespace;
+ Oid reltablespace;
+ Oid indtablespace;
reltablespace = get_rel_tablespace(conForm->conrelid);
indtablespace = get_rel_tablespace(indexOid);
if (OidIsValid(indtablespace) &&
indtablespace != reltablespace)
{
- char *spcname = get_tablespace_name(indtablespace);
+ char *spcname = get_tablespace_name(indtablespace);
- if (spcname) /* just paranoia... */
+ if (spcname) /* just paranoia... */
{
appendStringInfo(&buf, " USING INDEX TABLESPACE %s",
quote_identifier(spcname));
prettyFlags, 0);
/*
- * Now emit the constraint definition. There are cases where
- * the constraint expression will be fully parenthesized and
- * we don't need the outer parens ... but there are other
- * cases where we do need 'em. Be conservative for now.
+ * Now emit the constraint definition. There are cases
+ * where the constraint expression will be fully
+ * parenthesized and we don't need the outer parens ...
+ * but there are other cases where we do need 'em. Be
+ * conservative for now.
*
* Note that simply checking for leading '(' and trailing ')'
- * would NOT be good enough, consider "(x > 0) AND (y > 0)".
+ * would NOT be good enough, consider "(x > 0) AND (y >
+ * 0)".
*/
appendStringInfo(&buf, "CHECK (%s)", consrc);
Datum
pg_get_serial_sequence(PG_FUNCTION_ARGS)
{
- text *tablename = PG_GETARG_TEXT_P(0);
- text *columnname = PG_GETARG_TEXT_P(1);
+ text *tablename = PG_GETARG_TEXT_P(0);
+ text *columnname = PG_GETARG_TEXT_P(1);
RangeVar *tablerv;
Oid tableOid;
- char *column;
+ char *column;
AttrNumber attnum;
- Oid sequenceId = InvalidOid;
+ Oid sequenceId = InvalidOid;
Relation depRel;
ScanKeyData key[3];
SysScanDesc scan;
/* Get the OID of the table */
tablerv = makeRangeVarFromNameList(textToQualifiedNameList(tablename,
- "pg_get_serial_sequence"));
+ "pg_get_serial_sequence"));
tableOid = RangeVarGetRelid(tablerv, false);
/* Get the number of the column */
column = DatumGetCString(DirectFunctionCall1(textout,
- PointerGetDatum(columnname)));
+ PointerGetDatum(columnname)));
attnum = get_attnum(tableOid, column);
if (attnum == InvalidAttrNumber)
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
+ Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
* We assume any internal dependency of a relation on a column
if (OidIsValid(sequenceId))
{
HeapTuple classtup;
- Form_pg_class classtuple;
- char *nspname;
- char *result;
+ Form_pg_class classtuple;
+ char *nspname;
+ char *result;
/* Get the sequence's pg_class entry */
classtup = SearchSysCache(RELOID,
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
+ Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
- * We assume any internal dependency of a relation on the constraint
- * must be what we are looking for.
+ * We assume any internal dependency of a relation on the
+ * constraint must be what we are looking for.
*/
if (deprec->classid == RelOid_pg_class &&
deprec->objsubid == 0 &&
sortcoltype = exprType(sortexpr);
/* See whether operator is default < or > for datatype */
typentry = lookup_type_cache(sortcoltype,
- TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
+ TYPECACHE_LT_OPR | TYPECACHE_GT_OPR);
if (srt->sortop == typentry->lt_opr)
- /* ASC is default, so emit nothing */ ;
+ /* ASC is default, so emit nothing */ ;
else if (srt->sortop == typentry->gt_opr)
appendStringInfo(buf, " DESC");
else
SetOperationStmt *op = (SetOperationStmt *) setOp;
/*
- * We force parens whenever nesting two SetOperationStmts.
- * There are some cases in which parens are needed around a leaf
- * query too, but those are more easily handled at the next level
- * down (see code above).
+ * We force parens whenever nesting two SetOperationStmts. There
+ * are some cases in which parens are needed around a leaf query
+ * too, but those are more easily handled at the next level down
+ * (see code above).
*/
need_paren = !IsA(op->larg, RangeTblRef);
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resdom->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resdom->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and strip
- * off the top-level nodes representing the indirection assignments.
+ * Print any indirection needed (subfields or subscripts), and
+ * strip off the top-level nodes representing the indirection
+ * assignments.
*/
strippedexprs = lappend(strippedexprs,
processIndirection((Node *) tle->expr,
sep = "";
foreach(l, strippedexprs)
{
- Node *expr = lfirst(l);
+ Node *expr = lfirst(l);
appendStringInfo(buf, sep);
sep = ", ";
static void
get_update_query_def(Query *query, deparse_context *context)
{
- StringInfo buf = context->buf;
- char *sep;
- RangeTblEntry *rte;
- ListCell *l;
+ StringInfo buf = context->buf;
+ char *sep;
+ RangeTblEntry *rte;
+ ListCell *l;
/*
* Start the query with UPDATE relname SET
foreach(l, query->targetList)
{
TargetEntry *tle = (TargetEntry *) lfirst(l);
- Node *expr;
+ Node *expr;
if (tle->resdom->resjunk)
continue; /* ignore junk entries */
* tle->resname, since resname will fail to track RENAME.
*/
appendStringInfoString(buf,
- quote_identifier(get_relid_attribute_name(rte->relid,
- tle->resdom->resno)));
+ quote_identifier(get_relid_attribute_name(rte->relid,
+ tle->resdom->resno)));
/*
- * Print any indirection needed (subfields or subscripts), and strip
- * off the top-level nodes representing the indirection assignments.
+ * Print any indirection needed (subfields or subscripts), and
+ * strip off the top-level nodes representing the indirection
+ * assignments.
*/
expr = processIndirection((Node *) tle->expr, context);
static RangeTblEntry *
find_rte_by_refname(const char *refname, deparse_context *context)
{
- RangeTblEntry *result = NULL;
- ListCell *nslist;
+ RangeTblEntry *result = NULL;
+ ListCell *nslist;
foreach(nslist, context->namespaces)
{
deparse_namespace *dpns = (deparse_namespace *) lfirst(nslist);
- ListCell *rtlist;
+ ListCell *rtlist;
foreach(rtlist, dpns->rtable)
{
case T_BoolExpr: /* lower precedence */
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
}
case T_ArrayRef: /* other separators */
case T_ArrayExpr: /* other separators */
- case T_RowExpr: /* other separators */
+ case T_RowExpr: /* other separators */
case T_CoalesceExpr: /* own parentheses */
case T_NullIfExpr: /* other separators */
- case T_Aggref: /* own parentheses */
+ case T_Aggref: /* own parentheses */
case T_CaseExpr: /* other separators */
return true;
default:
bool need_parens;
/*
- * Parenthesize the argument unless it's a simple Var or
- * a FieldSelect. (In particular, if it's another ArrayRef,
+ * Parenthesize the argument unless it's a simple Var or a
+ * FieldSelect. (In particular, if it's another ArrayRef,
* we *must* parenthesize to avoid confusion.)
*/
need_parens = !IsA(aref->refexpr, Var) &&
if (need_parens)
appendStringInfoChar(buf, ')');
printSubscripts(aref, context);
+
/*
* Array assignment nodes should have been handled in
* processIndirection().
format_type_be(argType));
fieldname = get_relid_attribute_name(typrelid,
fselect->fieldnum);
+
/*
* Parenthesize the argument unless it's an ArrayRef or
- * another FieldSelect. Note in particular that it would be
- * WRONG to not parenthesize a Var argument; simplicity is not
- * the issue here, having the right number of names is.
+ * another FieldSelect. Note in particular that it would
+ * be WRONG to not parenthesize a Var argument; simplicity
+ * is not the issue here, having the right number of names
+ * is.
*/
need_parens = !IsA(fselect->arg, ArrayRef) &&
!IsA(fselect->arg, FieldSelect);
break;
case T_FieldStore:
+
/*
* We shouldn't see FieldStore here; it should have been
* stripped off by processIndirection().
if (caseexpr->arg)
{
/* Show only the RHS of "CaseTestExpr = RHS" */
- Node *rhs;
+ Node *rhs;
Assert(IsA(when->expr, OpExpr));
rhs = (Node *) lsecond(((OpExpr *) when->expr)->args);
case T_RowExpr:
{
- RowExpr *rowexpr = (RowExpr *) node;
+ RowExpr *rowexpr = (RowExpr *) node;
TupleDesc tupdesc = NULL;
ListCell *arg;
int i;
char *sep;
/*
- * If it's a named type and not RECORD, we may have to skip
- * dropped columns and/or claim there are NULLs for added
- * columns.
+ * If it's a named type and not RECORD, we may have to
+ * skip dropped columns and/or claim there are NULLs for
+ * added columns.
*/
if (rowexpr->row_typeid != RECORDOID)
{
}
/*
- * SQL99 allows "ROW" to be omitted when there is more than
- * one column, but for simplicity we always print it.
+ * SQL99 allows "ROW" to be omitted when there is more
+ * than one column, but for simplicity we always print it.
*/
appendStringInfo(buf, "ROW(");
sep = "";
appendStringInfo(buf, ")");
if (rowexpr->row_format == COERCE_EXPLICIT_CAST)
appendStringInfo(buf, "::%s",
- format_type_with_typemod(rowexpr->row_typeid, -1));
+ format_type_with_typemod(rowexpr->row_typeid, -1));
}
break;
extval = DatumGetCString(OidFunctionCall3(typoutput,
constval->constvalue,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(-1)));
switch (constval->consttype)
if (col != list_head(j->using))
appendStringInfo(buf, ", ");
appendStringInfoString(buf,
- quote_identifier(strVal(lfirst(col))));
+ quote_identifier(strVal(lfirst(col))));
}
appendStringInfoChar(buf, ')');
}
{
StringInfo buf = context->buf;
ListCell *col;
- AttrNumber attnum;
+ AttrNumber attnum;
bool first = true;
if (alias == NULL || alias->colnames == NIL)
elog(ERROR, "cache lookup failed for opclass %u", opclass);
opcrec = (Form_pg_opclass) GETSTRUCT(ht_opc);
- /* Special case for ARRAY_OPS: pretend it is default for any array type */
+ /*
+ * Special case for ARRAY_OPS: pretend it is default for any array
+ * type
+ */
if (OidIsValid(actual_datatype))
{
if (opcrec->opcintype == ANYARRAYOID &&
/* Must force use of opclass name if not in search path */
isvisible = OpclassIsVisible(opclass);
-
+
if (actual_datatype != opcrec->opcintype || !opcrec->opcdefault ||
!isvisible)
{
if (!OidIsValid(typrelid))
elog(ERROR, "argument type %s of FieldStore is not a tuple type",
format_type_be(fstore->resulttype));
+
/*
- * Get the field name. Note we assume here that there's only
+ * Get the field name. Note we assume here that there's only
* one field being assigned to. This is okay in stored rules
- * but could be wrong in executable target lists. Presently no
- * problem since explain.c doesn't print plan targetlists, but
- * someday may have to think of something ...
+ * but could be wrong in executable target lists. Presently
+ * no problem since explain.c doesn't print plan targetlists,
+ * but someday may have to think of something ...
*/
fieldname = get_relid_attribute_name(typrelid,
- linitial_int(fstore->fieldnums));
+ linitial_int(fstore->fieldnums));
appendStringInfo(buf, ".%s", quote_identifier(fieldname));
+
/*
* We ignore arg since it should be an uninteresting reference
* to the target column or subcolumn.
if (aref->refassgnexpr == NULL)
break;
printSubscripts(aref, context);
+
/*
- * We ignore refexpr since it should be an uninteresting reference
- * to the target column or subcolumn.
+ * We ignore refexpr since it should be an uninteresting
+ * reference to the target column or subcolumn.
*/
node = (Node *) aref->refassgnexpr;
}
ListCell *lowlist_item;
ListCell *uplist_item;
- lowlist_item = list_head(aref->reflowerindexpr); /* could be NULL */
+ lowlist_item = list_head(aref->reflowerindexpr); /* could be NULL */
foreach(uplist_item, aref->refupperindexpr)
{
appendStringInfoChar(buf, '[');
static void
print_operator_name(StringInfo buf, List *opname)
{
- ListCell *op = list_head(opname);
- int nnames = list_length(opname);
+ ListCell *op = list_head(opname);
+ int nnames = list_length(opname);
if (nnames == 1)
appendStringInfoString(buf, strVal(lfirst(op)));
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.163 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/selfuncs.c,v 1.164 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static unsigned char *convert_string_datum(Datum value, Oid typid);
static double convert_timevalue_to_scalar(Datum value, Oid typid);
static bool get_restriction_variable(Query *root, List *args, int varRelid,
- VariableStatData *vardata, Node **other,
- bool *varonleft);
+ VariableStatData *vardata, Node **other,
+ bool *varonleft);
static void get_join_variables(Query *root, List *args,
- VariableStatData *vardata1,
- VariableStatData *vardata2);
+ VariableStatData *vardata1,
+ VariableStatData *vardata2);
static void examine_variable(Query *root, Node *node, int varRelid,
- VariableStatData *vardata);
+ VariableStatData *vardata);
static double get_variable_numdistinct(VariableStatData *vardata);
static bool get_variable_maximum(Query *root, VariableStatData *vardata,
- Oid sortop, Datum *max);
+ Oid sortop, Datum *max);
static Selectivity prefix_selectivity(Query *root, VariableStatData *vardata,
Oid opclass, Const *prefix);
static Selectivity pattern_selectivity(Const *patt, Pattern_Type ptype);
double selec;
/*
- * If expression is not variable op something or something op variable,
- * then punt and return a default estimate.
+ * If expression is not variable op something or something op
+ * variable, then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
double selec;
/*
- * If expression is not variable op something or something op variable,
- * then punt and return a default estimate.
+ * If expression is not variable op something or something op
+ * variable, then punt and return a default estimate.
*/
if (!get_restriction_variable(root, args, varRelid,
&vardata, &other, &varonleft))
{
/*
* If we can't get variable statistics for the argument, perhaps
- * clause_selectivity can do something with it. We ignore
- * the possibility of a NULL value when using clause_selectivity,
- * and just assume the value is either TRUE or FALSE.
+ * clause_selectivity can do something with it. We ignore the
+ * possibility of a NULL value when using clause_selectivity, and
+ * just assume the value is either TRUE or FALSE.
*/
switch (booltesttype)
{
case IS_FALSE:
case IS_NOT_TRUE:
selec = 1.0 - (double) clause_selectivity(root, arg,
- varRelid, jointype);
+ varRelid, jointype);
break;
default:
elog(ERROR, "unrecognized booltesttype: %d",
default:
elog(ERROR, "unrecognized nulltesttype: %d",
(int) nulltesttype);
- return (Selectivity) 0; /* keep compiler quiet */
+ return (Selectivity) 0; /* keep compiler quiet */
}
}
{
/*
* We have most-common-value lists for both relations. Run
- * through the lists to see which MCVs actually join to each
- * other with the given operator. This allows us to determine
- * the exact join selectivity for the portion of the relations
- * represented by the MCV lists. We still have to estimate
- * for the remaining population, but in a skewed distribution
- * this gives us a big leg up in accuracy. For motivation see
- * the analysis in Y. Ioannidis and S. Christodoulakis, "On
- * the propagation of errors in the size of join results",
- * Technical Report 1018, Computer Science Dept., University
- * of Wisconsin, Madison, March 1991 (available from
- * ftp.cs.wisc.edu).
+ * through the lists to see which MCVs actually join to each other
+ * with the given operator. This allows us to determine the exact
+ * join selectivity for the portion of the relations represented
+ * by the MCV lists. We still have to estimate for the remaining
+ * population, but in a skewed distribution this gives us a big
+ * leg up in accuracy. For motivation see the analysis in Y.
+ * Ioannidis and S. Christodoulakis, "On the propagation of errors
+ * in the size of join results", Technical Report 1018, Computer
+ * Science Dept., University of Wisconsin, Madison, March 1991
+ * (available from ftp.cs.wisc.edu).
*/
FmgrInfo eqproc;
bool *hasmatch1;
hasmatch2 = (bool *) palloc0(nvalues2 * sizeof(bool));
/*
- * If we are doing any variant of JOIN_IN, pretend all the
- * values of the righthand relation are unique (ie, act as if
- * it's been DISTINCT'd).
+ * If we are doing any variant of JOIN_IN, pretend all the values
+ * of the righthand relation are unique (ie, act as if it's been
+ * DISTINCT'd).
*
- * NOTE: it might seem that we should unique-ify the lefthand
- * input when considering JOIN_REVERSE_IN. But this is not
- * so, because the join clause we've been handed has not been
- * commuted from the way the parser originally wrote it. We
- * know that the unique side of the IN clause is *always* on
- * the right.
+ * NOTE: it might seem that we should unique-ify the lefthand input
+ * when considering JOIN_REVERSE_IN. But this is not so, because
+ * the join clause we've been handed has not been commuted from
+ * the way the parser originally wrote it. We know that the
+ * unique side of the IN clause is *always* on the right.
*
- * NOTE: it would be dangerous to try to be smart about JOIN_LEFT
- * or JOIN_RIGHT here, because we do not have enough
- * information to determine which var is really on which side
- * of the join. Perhaps someday we should pass in more
- * information.
+ * NOTE: it would be dangerous to try to be smart about JOIN_LEFT or
+ * JOIN_RIGHT here, because we do not have enough information to
+ * determine which var is really on which side of the join.
+ * Perhaps someday we should pass in more information.
*/
if (jointype == JOIN_IN ||
jointype == JOIN_REVERSE_IN ||
}
/*
- * Note we assume that each MCV will match at most one member
- * of the other MCV list. If the operator isn't really
- * equality, there could be multiple matches --- but we don't
- * look for them, both for speed and because the math wouldn't
- * add up...
+ * Note we assume that each MCV will match at most one member of
+ * the other MCV list. If the operator isn't really equality,
+ * there could be multiple matches --- but we don't look for them,
+ * both for speed and because the math wouldn't add up...
*/
matchprodfreq = 0.0;
nmatches = 0;
pfree(hasmatch2);
/*
- * Compute total frequency of non-null values that are not in
- * the MCV lists.
+ * Compute total frequency of non-null values that are not in the
+ * MCV lists.
*/
otherfreq1 = 1.0 - nullfrac1 - matchfreq1 - unmatchfreq1;
otherfreq2 = 1.0 - nullfrac2 - matchfreq2 - unmatchfreq2;
CLAMP_PROBABILITY(otherfreq2);
/*
- * We can estimate the total selectivity from the point of
- * view of relation 1 as: the known selectivity for matched
- * MCVs, plus unmatched MCVs that are assumed to match against
- * random members of relation 2's non-MCV population, plus
- * non-MCV values that are assumed to match against random
- * members of relation 2's unmatched MCVs plus non-MCV values.
+ * We can estimate the total selectivity from the point of view of
+ * relation 1 as: the known selectivity for matched MCVs, plus
+ * unmatched MCVs that are assumed to match against random members
+ * of relation 2's non-MCV population, plus non-MCV values that
+ * are assumed to match against random members of relation 2's
+ * unmatched MCVs plus non-MCV values.
*/
totalsel1 = matchprodfreq;
if (nd2 > nvalues2)
(nd1 - nmatches);
/*
- * Use the smaller of the two estimates. This can be
- * justified in essentially the same terms as given below for
- * the no-stats case: to a first approximation, we are
- * estimating from the point of view of the relation with
- * smaller nd.
+ * Use the smaller of the two estimates. This can be justified in
+ * essentially the same terms as given below for the no-stats
+ * case: to a first approximation, we are estimating from the
+ * point of view of the relation with smaller nd.
*/
selec = (totalsel1 < totalsel2) ? totalsel1 : totalsel2;
}
{
/*
* We do not have MCV lists for both sides. Estimate the join
- * selectivity as
- * MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2). This is
- * plausible if we assume that the join operator is strict and
- * the non-null values are about equally distributed: a given
+ * selectivity as MIN(1/nd1,1/nd2)*(1-nullfrac1)*(1-nullfrac2).
+ * This is plausible if we assume that the join operator is strict
+ * and the non-null values are about equally distributed: a given
* non-null tuple of rel1 will join to either zero or
- * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are
- * at most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
- * selectivity of not more than
- * (1-nullfrac1)*(1-nullfrac2)/nd2. By the same logic it is
- * not more than (1-nullfrac1)*(1-nullfrac2)/nd1, so the
- * expression with MIN() is an upper bound. Using the MIN()
- * means we estimate from the point of view of the relation
- * with smaller nd (since the larger nd is determining the
- * MIN). It is reasonable to assume that most tuples in this
- * rel will have join partners, so the bound is probably
- * reasonably tight and should be taken as-is.
+ * N2*(1-nullfrac2)/nd2 rows of rel2, so total join rows are at
+ * most N1*(1-nullfrac1)*N2*(1-nullfrac2)/nd2 giving a join
+ * selectivity of not more than (1-nullfrac1)*(1-nullfrac2)/nd2.
+ * By the same logic it is not more than
+ * (1-nullfrac1)*(1-nullfrac2)/nd1, so the expression with MIN()
+ * is an upper bound. Using the MIN() means we estimate from the
+ * point of view of the relation with smaller nd (since the larger
+ * nd is determining the MIN). It is reasonable to assume that
+ * most tuples in this rel will have join partners, so the bound
+ * is probably reasonably tight and should be taken as-is.
*
- * XXX Can we be smarter if we have an MCV list for just one
- * side? It seems that if we assume equal distribution for the
- * other side, we end up with the same answer anyway.
+ * XXX Can we be smarter if we have an MCV list for just one side? It
+ * seems that if we assume equal distribution for the other side,
+ * we end up with the same answer anyway.
*/
double nullfrac1 = stats1 ? stats1->stanullfrac : 0.0;
double nullfrac2 = stats2 ? stats2->stanullfrac : 0.0;
right = (Node *) lsecond(args);
/*
- * Examine both sides. Note that when varRelid is nonzero, Vars of
+ * Examine both sides. Note that when varRelid is nonzero, Vars of
* other relations will be treated as pseudoconstants.
*/
examine_variable(root, left, varRelid, vardata);
{
vardata->statsTuple = SearchSysCache(STATRELATT,
ObjectIdGetDatum(relid),
- Int16GetDatum(var->varattno),
+ Int16GetDatum(var->varattno),
0, 0);
}
else
{
/*
- * XXX This means the Var comes from a JOIN or sub-SELECT. Later
- * add code to dig down into the join etc and see if we can trace
- * the variable to something with stats. (But beware of
- * sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps there are
- * no cases where this would really be useful, because we'd have
- * flattened the subselect if it is??)
+ * XXX This means the Var comes from a JOIN or sub-SELECT.
+ * Later add code to dig down into the join etc and see if we
+ * can trace the variable to something with stats. (But
+ * beware of sub-SELECTs with DISTINCT/GROUP BY/etc. Perhaps
+ * there are no cases where this would really be useful,
+ * because we'd have flattened the subselect if it is??)
*/
}
/*
* Okay, it's a more complicated expression. Determine variable
- * membership. Note that when varRelid isn't zero, only vars of
- * that relation are considered "real" vars.
+ * membership. Note that when varRelid isn't zero, only vars of that
+ * relation are considered "real" vars.
*/
varnos = pull_varnos(node);
if (varRelid == 0 || bms_is_member(varRelid, varnos))
{
onerel = find_base_rel(root,
- (varRelid ? varRelid : bms_singleton_member(varnos)));
+ (varRelid ? varRelid : bms_singleton_member(varnos)));
vardata->rel = onerel;
}
/* else treat it as a constant */
if (onerel)
{
/*
- * We have an expression in vars of a single relation. Try to
+ * We have an expression in vars of a single relation. Try to
* match it to expressional index columns, in hopes of finding
* some statistics.
*
* XXX it's conceivable that there are multiple matches with
* different index opclasses; if so, we need to pick one that
- * matches the operator we are estimating for. FIXME later.
+ * matches the operator we are estimating for. FIXME later.
*/
ListCell *ilist;
if (equal(node, indexkey))
{
/*
- * Found a match ... is it a unique index?
- * Tests here should match has_unique_index().
+ * Found a match ... is it a unique index? Tests
+ * here should match has_unique_index().
*/
if (index->unique &&
index->ncolumns == 1 &&
vardata->isunique = true;
/* Has it got stats? */
vardata->statsTuple = SearchSysCache(STATRELATT,
- ObjectIdGetDatum(index->indexoid),
- Int16GetDatum(pos + 1),
+ ObjectIdGetDatum(index->indexoid),
+ Int16GetDatum(pos + 1),
0, 0);
if (vardata->statsTuple)
break;
double ntuples;
/*
- * Determine the stadistinct value to use. There are cases where
- * we can get an estimate even without a pg_statistic entry, or
- * can get a better value than is in pg_statistic.
+ * Determine the stadistinct value to use. There are cases where we
+ * can get an estimate even without a pg_statistic entry, or can get a
+ * better value than is in pg_statistic.
*/
if (HeapTupleIsValid(vardata->statsTuple))
{
/*
* Special-case boolean columns: presumably, two distinct values.
*
- * Are there any other datatypes we should wire in special
- * estimates for?
+ * Are there any other datatypes we should wire in special estimates
+ * for?
*/
stadistinct = 2.0;
}
else
{
/*
- * We don't keep statistics for system columns, but in some
- * cases we can infer distinctness anyway.
+ * We don't keep statistics for system columns, but in some cases
+ * we can infer distinctness anyway.
*/
if (vardata->var && IsA(vardata->var, Var))
{
{
case ObjectIdAttributeNumber:
case SelfItemPointerAttributeNumber:
- stadistinct = -1.0; /* unique */
+ stadistinct = -1.0; /* unique */
break;
case TableOidAttributeNumber:
- stadistinct = 1.0; /* only 1 value */
+ stadistinct = 1.0; /* only 1 value */
break;
default:
- stadistinct = 0.0; /* means "unknown" */
+ stadistinct = 0.0; /* means "unknown" */
break;
}
}
else
- stadistinct = 0.0; /* means "unknown" */
+ stadistinct = 0.0; /* means "unknown" */
+
/*
* XXX consider using estimate_num_groups on expressions?
*/
}
/*
- * If there is a unique index for the variable, assume it is unique
- * no matter what pg_statistic says (the statistics could be out
- * of date). Can skip search if we already think it's unique.
+ * If there is a unique index for the variable, assume it is unique no
+ * matter what pg_statistic says (the statistics could be out of
+ * date). Can skip search if we already think it's unique.
*/
if (stadistinct != -1.0)
{
stadistinct = -1.0;
else if (vardata->var && IsA(vardata->var, Var) &&
vardata->rel &&
- has_unique_index(vardata->rel,
+ has_unique_index(vardata->rel,
((Var *) vardata->var)->varattno))
stadistinct = -1.0;
}
}
else
{
- bytea *bstr = DatumGetByteaP(patt_const->constvalue);
+ bytea *bstr = DatumGetByteaP(patt_const->constvalue);
pattlen = VARSIZE(bstr) - VARHDRSZ;
if (pattlen > 0)
}
else
{
- bytea *bstr = DatumGetByteaP(patt_const->constvalue);
+ bytea *bstr = DatumGetByteaP(patt_const->constvalue);
pattlen = VARSIZE(bstr) - VARHDRSZ;
if (pattlen > 0)
if (datatype == NAMEOID)
{
workstr = DatumGetCString(DirectFunctionCall1(nameout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
else if (datatype == BYTEAOID)
{
- bytea *bstr = DatumGetByteaP(str_const->constvalue);
+ bytea *bstr = DatumGetByteaP(str_const->constvalue);
len = VARSIZE(bstr) - VARHDRSZ;
if (len > 0)
else
{
workstr = DatumGetCString(DirectFunctionCall1(textout,
- str_const->constvalue));
+ str_const->constvalue));
len = strlen(workstr);
}
static Const *
string_to_bytea_const(const char *str, size_t str_len)
{
- bytea *bstr = palloc(VARHDRSZ + str_len);
- Datum conval;
+ bytea *bstr = palloc(VARHDRSZ + str_len);
+ Datum conval;
memcpy(VARDATA(bstr), str, str_len);
VARATT_SIZEP(bstr) = VARHDRSZ + str_len;
/*
* If the index is partial, AND the index predicate with the
* explicitly given indexquals to produce a more accurate idea of the
- * index selectivity. This may produce redundant clauses. We get rid
- * of exact duplicates in the code below. We expect that most
- * cases of partial redundancy (such as "x < 4" from the qual and
- * "x < 5" from the predicate) will be recognized and handled correctly
- * by clauselist_selectivity(). This assumption is somewhat fragile,
+ * index selectivity. This may produce redundant clauses. We get rid
+ * of exact duplicates in the code below. We expect that most cases
+ * of partial redundancy (such as "x < 4" from the qual and "x < 5"
+ * from the predicate) will be recognized and handled correctly by
+ * clauselist_selectivity(). This assumption is somewhat fragile,
* since it depends on pred_test() and clauselist_selectivity() having
- * similar capabilities, and there are certainly many cases where we will
- * end up with a too-low selectivity estimate. This will bias the system
- * in favor of using partial indexes where possible, which is not
- * necessarily a bad thing. But it'd be nice to do better someday.
+ * similar capabilities, and there are certainly many cases where we
+ * will end up with a too-low selectivity estimate. This will bias
+ * the system in favor of using partial indexes where possible, which
+ * is not necessarily a bad thing. But it'd be nice to do better
+ * someday.
*
* Note that index->indpred and indexQuals are both in implicit-AND form,
* so ANDing them together just takes merging the lists. However,
- * eliminating duplicates is a bit trickier because indexQuals contains
- * RestrictInfo nodes and the indpred does not. It is okay to pass a
- * mixed list to clauselist_selectivity, but we have to work a bit to
- * generate a list without logical duplicates. (We could just list_union
- * indpred and strippedQuals, but then we'd not get caching of per-qual
- * selectivity estimates.)
+ * eliminating duplicates is a bit trickier because indexQuals
+ * contains RestrictInfo nodes and the indpred does not. It is okay
+ * to pass a mixed list to clauselist_selectivity, but we have to work
+ * a bit to generate a list without logical duplicates. (We could
+ * just list_union indpred and strippedQuals, but then we'd not get
+ * caching of per-qual selectivity estimates.)
*/
if (index->indpred != NIL)
{
- List *strippedQuals;
- List *predExtraQuals;
+ List *strippedQuals;
+ List *predExtraQuals;
strippedQuals = get_actual_clauses(indexQuals);
predExtraQuals = list_difference(index->indpred, strippedQuals);
/*
* Compute the index access cost.
*
- * Disk cost: our generic assumption is that the index pages will be
- * read sequentially, so they have cost 1.0 each, not random_page_cost.
+ * Disk cost: our generic assumption is that the index pages will be read
+ * sequentially, so they have cost 1.0 each, not random_page_cost.
*/
*indexTotalCost = numIndexPages;
/*
- * CPU cost: any complex expressions in the indexquals will need to
- * be evaluated once at the start of the scan to reduce them to runtime
- * keys to pass to the index AM (see nodeIndexscan.c). We model the
- * per-tuple CPU costs as cpu_index_tuple_cost plus one cpu_operator_cost
- * per indexqual operator.
+ * CPU cost: any complex expressions in the indexquals will need to be
+ * evaluated once at the start of the scan to reduce them to runtime
+ * keys to pass to the index AM (see nodeIndexscan.c). We model the
+ * per-tuple CPU costs as cpu_index_tuple_cost plus one
+ * cpu_operator_cost per indexqual operator.
*
* Note: this neglects the possible costs of rechecking lossy operators
- * and OR-clause expressions. Detecting that that might be needed seems
- * more expensive than it's worth, though, considering all the other
- * inaccuracies here ...
+ * and OR-clause expressions. Detecting that that might be needed
+ * seems more expensive than it's worth, though, considering all the
+ * other inaccuracies here ...
*/
cost_qual_eval(&index_qual_cost, indexQuals);
qual_op_cost = cpu_operator_cost * list_length(indexQuals);
indexSelectivity, indexCorrelation);
/*
- * If we can get an estimate of the first column's ordering correlation C
- * from pg_statistic, estimate the index correlation as C for a single-
- * column index, or C * 0.75 for multiple columns. (The idea here is
- * that multiple columns dilute the importance of the first column's
- * ordering, but don't negate it entirely. Before 8.0 we divided the
- * correlation by the number of columns, but that seems too strong.)
+ * If we can get an estimate of the first column's ordering
+ * correlation C from pg_statistic, estimate the index correlation as
+ * C for a single- column index, or C * 0.75 for multiple columns.
+ * (The idea here is that multiple columns dilute the importance of
+ * the first column's ordering, but don't negate it entirely. Before
+ * 8.0 we divided the correlation by the number of columns, but that
+ * seems too strong.)
*/
if (index->indexkeys[0] != 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.46 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/tid.c,v 1.47 2004/08/29 05:06:49 momjian Exp $
*
* NOTES
* input routine largely stolen from boxin().
if (list_length(rewrite->actions) != 1)
elog(ERROR, "only one select rule is allowed in views");
query = (Query *) linitial(rewrite->actions);
- tle = get_tle_by_resno(query->targetList, tididx+1);
+ tle = get_tle_by_resno(query->targetList, tididx + 1);
if (tle && tle->expr && IsA(tle->expr, Var))
{
Var *var = (Var *) tle->expr;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.111 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/timestamp.c,v 1.112 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int32 typmod = PG_GETARG_INT32(2);
Timestamp result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
int dtype;
{
Timestamp timestamp = PG_GETARG_TIMESTAMP(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char *tzn = NULL;
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
Timestamp timestamp;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
/* rangecheck: see if timestamp_out would like it */
if (TIMESTAMP_NOT_FINITE(timestamp))
- /* ok */;
+ /* ok */ ;
else if (timestamp2tm(timestamp, NULL, tm, &fsec, NULL) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
int32 typmod = PG_GETARG_INT32(2);
TimestampTz result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int tz;
int dtype;
TimestampTz dt = PG_GETARG_TIMESTAMPTZ(0);
char *result;
int tz;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char *tzn;
timestamptz_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
- TimestampTz timestamp;
+ TimestampTz timestamp;
int tz;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char *tzn;
/* rangecheck: see if timestamptz_out would like it */
if (TIMESTAMP_NOT_FINITE(timestamp))
- /* ok */;
+ /* ok */ ;
else if (timestamp2tm(timestamp, &tz, tm, &fsec, &tzn) != 0)
ereport(ERROR,
(errcode(ERRCODE_DATETIME_VALUE_OUT_OF_RANGE),
int32 typmod = PG_GETARG_INT32(2);
Interval *result;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
int dtype;
int nf;
{
Interval *span = PG_GETARG_INTERVAL_P(0);
char *result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char buf[MAXDATELEN + 1];
* -1 on out of range
*/
int
-timestamp2tm(Timestamp dt, int *tzp, struct pg_tm *tm, fsec_t *fsec, char **tzn)
+timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm, fsec_t *fsec, char **tzn)
{
Timestamp date;
Timestamp time;
*
* First, convert to an integral timestamp, avoiding possibly
* platform-specific roundoff-in-wrong-direction errors, and adjust to
- * Unix epoch. Then see if we can convert to pg_time_t without loss.
+ * Unix epoch. Then see if we can convert to pg_time_t without loss.
* This coding avoids hardwiring any assumptions about the width of
* pg_time_t, so it should behave sanely on machines without int64.
*/
utime = (pg_time_t) dt;
if ((Timestamp) utime == dt)
{
- struct pg_tm *tx = pg_localtime(&utime);
+ struct pg_tm *tx = pg_localtime(&utime);
tm->tm_year = tx->tm_year + 1900;
tm->tm_mon = tx->tm_mon + 1;
#ifdef HAVE_INT64_TIMESTAMP
int date;
int64 time;
+
#else
double date,
time;
{
#ifdef HAVE_INT64_TIMESTAMP
int64 time;
+
#else
double time;
#endif
void
GetEpochTime(struct pg_tm * tm)
{
- struct pg_tm *t0;
- pg_time_t epoch = 0;
+ struct pg_tm *t0;
+ pg_time_t epoch = 0;
t0 = pg_gmtime(&epoch);
SetEpochTimestamp(void)
{
Timestamp dt;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
GetEpochTime(tm);
timestamp_eq_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_ne_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_lt_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_gt_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_le_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_ge_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
timestamp_cmp_timestamptz(PG_FUNCTION_ARGS)
{
Timestamp timestampVal = PG_GETARG_TIMESTAMP(0);
- TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
- TimestampTz dt1;
+ TimestampTz dt2 = PG_GETARG_TIMESTAMPTZ(1);
+ TimestampTz dt1;
dt1 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_eq_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_ne_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_lt_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_gt_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_le_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_ge_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
Datum
timestamptz_cmp_timestamp(PG_FUNCTION_ARGS)
{
- TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
+ TimestampTz dt1 = PG_GETARG_TIMESTAMPTZ(0);
Timestamp timestampVal = PG_GETARG_TIMESTAMP(1);
- TimestampTz dt2;
+ TimestampTz dt2;
dt2 = timestamp2timestamptz(timestampVal);
{
if (span->month != 0)
{
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
{
if (span->month != 0)
{
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
fsec_t fsec,
fsec1,
fsec2;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
- struct pg_tm tt1,
+ struct pg_tm tt1,
*tm1 = &tt1;
- struct pg_tm tt2,
+ struct pg_tm tt2,
*tm2 = &tt2;
result = (Interval *) palloc(sizeof(Interval));
fsec_t fsec,
fsec1,
fsec2;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
- struct pg_tm tt1,
+ struct pg_tm tt1,
*tm1 = &tt1;
- struct pg_tm tt2,
+ struct pg_tm tt2,
*tm2 = &tt2;
result = (Interval *) palloc(sizeof(Interval));
val;
char *lowunits;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_NOT_FINITE(timestamp))
switch (val)
{
case DTK_WEEK:
- isoweek2date( date2isoweek( tm->tm_year, tm->tm_mon, tm->tm_mday ), &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday) );
+ isoweek2date(date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday), &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
case DTK_MILLENNIUM:
/* see comments in timestamptz_trunc */
if (tm->tm_year > 0)
- tm->tm_year = ((tm->tm_year+999) / 1000) * 1000 - 999;
+ tm->tm_year = ((tm->tm_year + 999) / 1000) * 1000 - 999;
else
- tm->tm_year = - ((999 - (tm->tm_year-1))/1000) * 1000 + 1;
+ tm->tm_year = -((999 - (tm->tm_year - 1)) / 1000) * 1000 + 1;
case DTK_CENTURY:
/* see comments in timestamptz_trunc */
if (tm->tm_year > 0)
- tm->tm_year = ((tm->tm_year+99) / 100) * 100 - 99;
+ tm->tm_year = ((tm->tm_year + 99) / 100) * 100 - 99;
else
- tm->tm_year = - ((99 - (tm->tm_year-1))/100) * 100 + 1;
+ tm->tm_year = -((99 - (tm->tm_year - 1)) / 100) * 100 + 1;
case DTK_DECADE:
/* see comments in timestamptz_trunc */
if (val != DTK_MILLENNIUM && val != DTK_CENTURY)
if (tm->tm_year > 0)
tm->tm_year = (tm->tm_year / 10) * 10;
else
- tm->tm_year = - ((8-(tm->tm_year-1)) / 10) * 10;
+ tm->tm_year = -((8 - (tm->tm_year - 1)) / 10) * 10;
}
case DTK_YEAR:
tm->tm_mon = 1;
char *lowunits;
fsec_t fsec;
char *tzn;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_NOT_FINITE(timestamp))
switch (val)
{
case DTK_WEEK:
- isoweek2date( date2isoweek( tm->tm_year, tm->tm_mon, tm->tm_mday ), &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday) );
+ isoweek2date(date2isoweek(tm->tm_year, tm->tm_mon, tm->tm_mday), &(tm->tm_year), &(tm->tm_mon), &(tm->tm_mday));
tm->tm_hour = 0;
tm->tm_min = 0;
tm->tm_sec = 0;
break;
/* one may consider DTK_THOUSAND and DTK_HUNDRED... */
case DTK_MILLENNIUM:
- /* truncating to the millennium? what is this supposed to mean?
- * let us put the first year of the millennium...
+
+ /*
+ * truncating to the millennium? what is this supposed to
+ * mean? let us put the first year of the millennium...
* i.e. -1000, 1, 1001, 2001...
*/
if (tm->tm_year > 0)
- tm->tm_year = ((tm->tm_year+999) / 1000) * 1000 - 999;
+ tm->tm_year = ((tm->tm_year + 999) / 1000) * 1000 - 999;
else
- tm->tm_year = - ((999 - (tm->tm_year-1))/1000) * 1000 + 1;
+ tm->tm_year = -((999 - (tm->tm_year - 1)) / 1000) * 1000 + 1;
case DTK_CENTURY:
/* truncating to the century? as above: -100, 1, 101... */
if (tm->tm_year > 0)
- tm->tm_year = ((tm->tm_year+99) / 100) * 100 - 99 ;
+ tm->tm_year = ((tm->tm_year + 99) / 100) * 100 - 99;
else
- tm->tm_year = - ((99 - (tm->tm_year-1))/100) * 100 + 1;
+ tm->tm_year = -((99 - (tm->tm_year - 1)) / 100) * 100 + 1;
case DTK_DECADE:
- /* truncating to the decade? first year of the decade.
+
+ /*
+ * truncating to the decade? first year of the decade.
* must not be applied if year was truncated before!
*/
if (val != DTK_MILLENNIUM && val != DTK_CENTURY)
if (tm->tm_year > 0)
tm->tm_year = (tm->tm_year / 10) * 10;
else
- tm->tm_year = - ((8-(tm->tm_year-1)) / 10) * 10;
+ tm->tm_year = -((8 - (tm->tm_year - 1)) / 10) * 10;
}
case DTK_YEAR:
tm->tm_mon = 1;
val;
char *lowunits;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
result = (Interval *) palloc(sizeof(Interval));
if (!*year)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot calculate week number without year information")));
+ errmsg("cannot calculate week number without year information")));
/* fourth day of current year */
day4 = date2j(*year, 1, 4);
int
date2isoyear(int year, int mon, int mday)
{
- float8 result;
- int day0,
- day4,
- dayn;
+ float8 result;
+ int day0,
+ day4,
+ dayn;
/* current day */
dayn = date2j(year, mon, mday);
day0 = j2day(day4 - 1);
if (dayn >= (day4 - day0))
- {
year++;
- }
}
return year;
val;
char *lowunits;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_NOT_FINITE(timestamp))
if (tm->tm_year > 0)
result = tm->tm_year;
else
- /* there is no year 0, just 1 BC and 1 AD*/
- result = tm->tm_year - 1;
+ /* there is no year 0, just 1 BC and 1 AD */
+ result = tm->tm_year - 1;
break;
case DTK_DECADE:
- /* what is a decade wrt dates?
- * let us assume that decade 199 is 1990 thru 1999...
- * decade 0 starts on year 1 BC, and -1 is 11 BC thru 2 BC...
+
+ /*
+ * what is a decade wrt dates? let us assume that decade
+ * 199 is 1990 thru 1999... decade 0 starts on year 1 BC,
+ * and -1 is 11 BC thru 2 BC...
*/
- if (tm->tm_year>=0)
+ if (tm->tm_year >= 0)
result = (tm->tm_year / 10);
else
- result = -((8-(tm->tm_year-1)) / 10);
+ result = -((8 - (tm->tm_year - 1)) / 10);
break;
case DTK_CENTURY:
- /* centuries AD, c>0: year in [ (c-1)*100+1 : c*100 ]
- * centuries BC, c<0: year in [ c*100 : (c+1)*100-1 ]
- * there is no number 0 century.
+
+ /*
+ * centuries AD, c>0: year in [ (c-1)*100+1 : c*100
+ * ] centuries BC, c<0: year in [ c*100 :
+ * (c+1)*100-1 ] there is no number 0 century.
*/
if (tm->tm_year > 0)
- result = ((tm->tm_year+99) / 100);
+ result = ((tm->tm_year + 99) / 100);
else
/* caution: C division may have negative remainder */
- result = - ((99 - (tm->tm_year-1))/100);
+ result = -((99 - (tm->tm_year - 1)) / 100);
break;
case DTK_MILLENNIUM:
/* see comments above. */
if (tm->tm_year > 0)
- result = ((tm->tm_year+999) / 1000);
+ result = ((tm->tm_year + 999) / 1000);
else
- result = - ((999 - (tm->tm_year-1))/1000);
+ result = -((999 - (tm->tm_year - 1)) / 1000);
break;
case DTK_JULIAN:
double dummy;
fsec_t fsec;
char *tzn;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
if (TIMESTAMP_NOT_FINITE(timestamp))
case DTK_DECADE:
/* see comments in timestamp_part */
- if (tm->tm_year>0)
+ if (tm->tm_year > 0)
result = (tm->tm_year / 10);
else
- result = - ((8-(tm->tm_year-1)) / 10);
+ result = -((8 - (tm->tm_year - 1)) / 10);
break;
case DTK_CENTURY:
/* see comments in timestamp_part */
if (tm->tm_year > 0)
- result = ((tm->tm_year+99) / 100);
+ result = ((tm->tm_year + 99) / 100);
else
- result = - ((99 - (tm->tm_year-1))/100);
+ result = -((99 - (tm->tm_year - 1)) / 100);
break;
case DTK_MILLENNIUM:
/* see comments in timestamp_part */
if (tm->tm_year > 0)
- result = ((tm->tm_year+999) / 1000);
+ result = ((tm->tm_year + 999) / 1000);
else
- result = - ((999 - (tm->tm_year-1))/1000);
+ result = -((999 - (tm->tm_year - 1)) / 1000);
break;
case DTK_JULIAN:
val;
char *lowunits;
fsec_t fsec;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
lowunits = downcase_truncate_identifier(VARDATA(units),
timestamp2timestamptz(Timestamp timestamp)
{
TimestampTz result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
int tz;
{
TimestampTz timestamp = PG_GETARG_TIMESTAMPTZ(0);
Timestamp result;
- struct pg_tm tt,
+ struct pg_tm tt,
*tm = &tt;
fsec_t fsec;
char *tzn;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.41 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varbit.c,v 1.42 2004/08/29 05:06:49 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
/* store last fractional byte */
if (destbitsleft > 0)
- {
*r = (bits8) ((a << (8 - destbitsleft)) & BITMASK);
- }
PG_RETURN_VARBIT_P(result);
}
}
/* store last fractional byte */
if (destbitsleft > 0)
- {
*r = (bits8) ((a << (8 - destbitsleft)) & BITMASK);
- }
PG_RETURN_VARBIT_P(result);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.106 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varchar.c,v 1.107 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
for (i = maxmblen - VARHDRSZ; i < len - VARHDRSZ; i++)
if (*(VARDATA(source) + i) != ' ')
ereport(ERROR,
- (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
- errmsg("value too long for type character(%d)",
- maxlen - VARHDRSZ)));
+ (errcode(ERRCODE_STRING_DATA_RIGHT_TRUNCATION),
+ errmsg("value too long for type character(%d)",
+ maxlen - VARHDRSZ)));
}
len = maxmblen;
/* get number of bytes, ignoring trailing spaces */
len = bcTruelen(arg);
-
+
/* in multibyte encoding, convert to number of characters */
if (pg_database_encoding_max_length() != 1)
len = pg_mbstrlen_with_len(VARDATA(arg), len);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.116 2004/08/29 04:12:52 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/adt/varlena.c,v 1.117 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Does the real work for textlen()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed form. We can avoid decompressing it at all
* in some cases.
*/
* Does the real work for text_substr() and text_substr_no_len()
*
* This is broken out so it can be called directly by other string processing
- * functions. Note that the argument is passed as a Datum, to indicate that
+ * functions. Note that the argument is passed as a Datum, to indicate that
* it may still be in compressed/toasted form. We can avoid detoasting all
* of it in some cases.
*/
endp = nextp;
if (curname == nextp)
return false; /* empty unquoted name not allowed */
+
/*
- * Downcase the identifier, using same code as main lexer does.
+ * Downcase the identifier, using same code as main lexer
+ * does.
*
* XXX because we want to overwrite the input in-place, we cannot
* support a downcasing transformation that increases the
{
/* otherwise create array and exit */
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate,
- CurrentMemoryContext));
+ CurrentMemoryContext));
}
}
else if (start_posn == 0)
/* interior field requested */
result_text = text_substring(PointerGetDatum(inputstring),
start_posn + fldsep_len,
- end_posn - start_posn - fldsep_len,
+ end_posn - start_posn - fldsep_len,
false);
}
value = DatumGetCString(FunctionCall3(&my_extra->proc,
itemvalue,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(-1)));
if (i > 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.115 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/catcache.c,v 1.116 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_MINSIZE,
- ALLOCSET_DEFAULT_INITSIZE,
- ALLOCSET_DEFAULT_MAXSIZE);
+ ALLOCSET_DEFAULT_MINSIZE,
+ ALLOCSET_DEFAULT_INITSIZE,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
* the other backends won't see our updated tuples as good.
*
* When a subtransaction aborts, we can process and discard any events
- * it has queued. When a subtransaction commits, we just add its events
+ * it has queued. When a subtransaction commits, we just add its events
* to the pending lists of the parent transaction.
*
* In short, we need to remember until xact end every insert or delete
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.65 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/inval.c,v 1.66 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
InvalidationListHeader PriorCmdInvalidMsgs;
/* init file must be invalidated? */
- bool RelcacheInitFileInval;
+ bool RelcacheInitFileInval;
} TransInvalidationInfo;
static TransInvalidationInfo *transInvalInfo = NULL;
/* relfilenode fields must be checked to support reassignment */
ProcessMessageList(hdr->rclist,
if (msg->rc.relId == relId &&
- RelFileNodeEquals(msg->rc.physId, physId)) return);
+ RelFileNodeEquals(msg->rc.physId, physId)) return);
/* OK, add the item */
msg.rc.id = SHAREDINVALRELCACHE_ID;
else if (msg->id == SHAREDINVALRELCACHE_ID)
{
/*
- * If the message includes a valid relfilenode, we must ensure that
- * smgr cache entry gets zapped. The relcache will handle this if
- * called, otherwise we must do it directly.
+ * If the message includes a valid relfilenode, we must ensure
+ * that smgr cache entry gets zapped. The relcache will handle
+ * this if called, otherwise we must do it directly.
*/
if (msg->rc.dbId == MyDatabaseId || msg->rc.dbId == InvalidOid)
{
Oid tupleRelId;
Oid databaseId;
Oid relationId;
- RelFileNode rnode;
+ RelFileNode rnode;
/* Do nothing during bootstrap */
if (IsBootstrapProcessingMode())
rnode.spcNode = MyDatabaseTableSpace;
rnode.dbNode = databaseId;
rnode.relNode = classtup->relfilenode;
+
/*
- * Note: during a pg_class row update that assigns a new relfilenode
- * or reltablespace value, we will be called on both the old and new
- * tuples, and thus will broadcast invalidation messages showing both
- * the old and new RelFileNode values. This ensures that other
- * backends will close smgr references to the old file.
+ * Note: during a pg_class row update that assigns a new
+ * relfilenode or reltablespace value, we will be called on both
+ * the old and new tuples, and thus will broadcast invalidation
+ * messages showing both the old and new RelFileNode values. This
+ * ensures that other backends will close smgr references to the
+ * old file.
*/
}
else if (tupleRelId == RelOid_pg_attribute)
Form_pg_attribute atttup = (Form_pg_attribute) GETSTRUCT(tuple);
relationId = atttup->attrelid;
+
/*
- * KLUGE ALERT: we always send the relcache event with MyDatabaseId,
- * even if the rel in question is shared (which we can't easily tell).
- * This essentially means that only backends in this same database
- * will react to the relcache flush request. This is in fact
- * appropriate, since only those backends could see our pg_attribute
- * change anyway. It looks a bit ugly though.
+ * KLUGE ALERT: we always send the relcache event with
+ * MyDatabaseId, even if the rel in question is shared (which we
+ * can't easily tell). This essentially means that only backends
+ * in this same database will react to the relcache flush request.
+ * This is in fact appropriate, since only those backends could
+ * see our pg_attribute change anyway. It looks a bit ugly
+ * though.
*/
databaseId = MyDatabaseId;
/* We assume no smgr cache flush is needed, either */
RelationCacheInitFileInvalidate(true);
AppendInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
- &transInvalInfo->CurrentCmdInvalidMsgs);
+ &transInvalInfo->CurrentCmdInvalidMsgs);
ProcessInvalidationMessages(&transInvalInfo->PriorCmdInvalidMsgs,
SendSharedInvalidMessage);
* We can forget about CurrentCmdInvalidMsgs too, since those changes haven't
* touched the caches yet.
*
- * In any case, pop the transaction stack. We need not physically free memory
+ * In any case, pop the transaction stack. We need not physically free memory
* here, since CurTransactionContext is about to be emptied anyway
* (if aborting).
*/
{
/*
* You might think this shouldn't be called outside any transaction,
- * but bootstrap does it, and also ABORT issued when not in a transaction.
- * So just quietly return if no state to work on.
+ * but bootstrap does it, and also ABORT issued when not in a
+ * transaction. So just quietly return if no state to work on.
*/
if (transInvalInfo == NULL)
return;
Form_pg_class classtup = (Form_pg_class) GETSTRUCT(classTuple);
Oid databaseId;
Oid relationId;
- RelFileNode rnode;
+ RelFileNode rnode;
relationId = HeapTupleGetOid(classTuple);
if (classtup->relisshared)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.115 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/lsyscache.c,v 1.116 2004/08/29 05:06:50 momjian Exp $
*
* NOTES
* Eventually, the index information should go through here, too.
/* Convert C string to a value of the given type */
datum = OidFunctionCall3(type->typinput,
CStringGetDatum(strDefaultVal),
- ObjectIdGetDatum(getTypeIOParam(typeTuple)),
+ ObjectIdGetDatum(getTypeIOParam(typeTuple)),
Int32GetDatum(-1));
/* Build a Const node containing the value */
expr = (Node *) makeConst(typid,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.209 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/relcache.c,v 1.210 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
#define RELCACHE_INIT_FILENAME "pg_internal.init"
-#define RELCACHE_INIT_FILEMAGIC 0x573262 /* version ID value */
+#define RELCACHE_INIT_FILEMAGIC 0x573262 /* version ID value */
/*
* hardcoded tuple descriptors. see include/catalog/pg_attribute.h
constr->num_check = relation->rd_rel->relchecks;
constr->check = (ConstrCheck *)
MemoryContextAllocZero(CacheMemoryContext,
- constr->num_check * sizeof(ConstrCheck));
+ constr->num_check * sizeof(ConstrCheck));
CheckConstraintFetch(relation);
}
else
relation->rd_supportinfo = supportinfo;
/*
- * Fill the operator and support procedure OID arrays.
- * (supportinfo is left as zeroes, and is filled on-the-fly when used)
+ * Fill the operator and support procedure OID arrays. (supportinfo is
+ * left as zeroes, and is filled on-the-fly when used)
*/
IndexSupportInitialize(relation->rd_index,
operator, support,
}
/*
- * Scan pg_amproc to obtain support procs for the opclass. We only fetch
- * the default ones (those with subtype zero).
+ * Scan pg_amproc to obtain support procs for the opclass. We only
+ * fetch the default ones (those with subtype zero).
*/
if (numSupport > 0)
{
* because it will never be replaced. The input values must be
* correctly defined by macros in src/include/catalog/ headers.
*
- * Note however that rd_att's tdtypeid, tdtypmod, tdhasoid fields are
- * not right at this point. They will be fixed later when the real
+ * Note however that rd_att's tdtypeid, tdtypmod, tdhasoid fields are not
+ * right at this point. They will be fixed later when the real
* pg_class row is loaded.
*/
relation->rd_att = CreateTemplateTupleDesc(natts, false);
/*
* RelationReloadClassinfo - reload the pg_class row (only)
*
- * This function is used only for nailed indexes. Since a REINDEX can
+ * This function is used only for nailed indexes. Since a REINDEX can
* change the relfilenode value for a nailed index, we have to reread
* the pg_class row anytime we get an SI invalidation on a nailed index
* (without throwing away the whole relcache entry, since we'd be unable
/* Read the pg_class row */
buildinfo.infotype = INFO_RELID;
buildinfo.i.info_id = relation->rd_id;
+
/*
* Don't try to use an indexscan of pg_class_oid_index to reload the
* info for pg_class_oid_index ...
/*
* Never, never ever blow away a nailed-in system relation, because
* we'd be unable to recover. However, we must reset rd_targblock, in
- * case we got called because of a relation cache flush that was triggered
- * by VACUUM.
+ * case we got called because of a relation cache flush that was
+ * triggered by VACUUM.
*
- * If it's a nailed index, then we need to re-read the pg_class row to see
- * if its relfilenode changed. We can't necessarily do that here, because
- * we might be in a failed transaction. We assume it's okay to do it if
- * there are open references to the relcache entry (cf notes for
- * AtEOXact_RelationCache). Otherwise just mark the entry as possibly
- * invalid, and it'll be fixed when next opened.
+ * If it's a nailed index, then we need to re-read the pg_class row to
+ * see if its relfilenode changed. We can't necessarily do that here,
+ * because we might be in a failed transaction. We assume it's okay
+ * to do it if there are open references to the relcache entry (cf
+ * notes for AtEOXact_RelationCache). Otherwise just mark the entry
+ * as possibly invalid, and it'll be fixed when next opened.
*/
if (relation->rd_isnailed)
{
relation->rd_targblock = InvalidBlockNumber;
if (relation->rd_rel->relkind == RELKIND_INDEX)
{
- relation->rd_isvalid = false; /* needs to be revalidated */
+ relation->rd_isvalid = false; /* needs to be revalidated */
if (relation->rd_refcnt > 1)
RelationReloadClassinfo(relation);
}
{
/*
* When rebuilding an open relcache entry, must preserve ref count
- * and rd_createxact state. Also attempt to preserve the tupledesc
- * and rewrite-rule substructures in place.
+ * and rd_createxact state. Also attempt to preserve the
+ * tupledesc and rewrite-rule substructures in place.
*
- * Note that this process does not touch CurrentResourceOwner;
- * which is good because whatever ref counts the entry may have
- * do not necessarily belong to that resource owner.
+ * Note that this process does not touch CurrentResourceOwner; which
+ * is good because whatever ref counts the entry may have do not
+ * necessarily belong to that resource owner.
*/
int old_refcnt = relation->rd_refcnt;
TransactionId old_createxact = relation->rd_createxact;
*
* Ordinarily, if rnode is supplied then it will match the relfilenode of
* the target relid. However, it's possible for rnode to be different if
- * someone is engaged in a relfilenode change. In that case we want to
- * make sure we clear the right cache entries. This has to be done here
+ * someone is engaged in a relfilenode change. In that case we want to
+ * make sure we clear the right cache entries. This has to be done here
* to keep things in sync between relcache and smgr cache --- we can't have
* someone flushing an smgr cache entry that a relcache entry still points
* to.
/*
* RelationCacheInvalidate
* Blow away cached relation descriptors that have zero reference counts,
- * and rebuild those with positive reference counts. Also reset the smgr
+ * and rebuild those with positive reference counts. Also reset the smgr
* relation cache.
*
* This is currently used only to recover from SI message buffer overflow,
/*
* Is it a relation created in the current transaction?
*
- * During commit, reset the flag to zero, since we are now out of
- * the creating transaction. During abort, simply delete the
- * relcache entry --- it isn't interesting any longer. (NOTE: if
- * we have forgotten the new-ness of a new relation due to a
- * forced cache flush, the entry will get deleted anyway by
- * shared-cache-inval processing of the aborted pg_class
- * insertion.)
+ * During commit, reset the flag to zero, since we are now out of the
+ * creating transaction. During abort, simply delete the relcache
+ * entry --- it isn't interesting any longer. (NOTE: if we have
+ * forgotten the new-ness of a new relation due to a forced cache
+ * flush, the entry will get deleted anyway by shared-cache-inval
+ * processing of the aborted pg_class insertion.)
*/
if (TransactionIdIsValid(relation->rd_createxact))
{
static List *
insert_ordered_oid(List *list, Oid datum)
{
- ListCell *prev;
+ ListCell *prev;
/* Does the datum belong at the front? */
if (list == NIL || datum < linitial_oid(list))
prev = list_head(list);
for (;;)
{
- ListCell *curr = lnext(prev);
+ ListCell *curr = lnext(prev);
if (curr == NULL || datum < lfirst_oid(curr))
- break; /* it belongs after 'prev', before 'curr' */
+ break; /* it belongs after 'prev', before 'curr' */
prev = curr;
}
* RelationSetIndexList -- externally force the index list contents
*
* This is used to temporarily override what we think the set of valid
- * indexes is. The forcing will be valid only until transaction commit
+ * indexes is. The forcing will be valid only until transaction commit
* or abort.
*
* This should only be applied to nailed relations, because in a non-nailed
/* Okay to replace old list */
list_free(relation->rd_indexlist);
relation->rd_indexlist = indexIds;
- relation->rd_indexvalid = 2; /* mark list as forced */
+ relation->rd_indexvalid = 2; /* mark list as forced */
}
/*
pfree(exprsString);
/*
- * Run the expressions through flatten_andors and eval_const_expressions.
- * This is not just an optimization, but is necessary, because the planner
- * will be comparing them to similarly-processed qual clauses, and may
- * fail to detect valid matches without this.
+ * Run the expressions through flatten_andors and
+ * eval_const_expressions. This is not just an optimization, but is
+ * necessary, because the planner will be comparing them to
+ * similarly-processed qual clauses, and may fail to detect valid
+ * matches without this.
*/
result = (List *) flatten_andors((Node *) result);
pfree(predString);
/*
- * Run the expression through canonicalize_qual and eval_const_expressions.
- * This is not just an optimization, but is necessary, because the planner
- * will be comparing it to similarly-processed qual clauses, and may fail
- * to detect valid matches without this.
+ * Run the expression through canonicalize_qual and
+ * eval_const_expressions. This is not just an optimization, but is
+ * necessary, because the planner will be comparing it to
+ * similarly-processed qual clauses, and may fail to detect valid
+ * matches without this.
*/
result = (List *) canonicalize_qual((Expr *) result);
rel->rd_att = CreateTemplateTupleDesc(relform->relnatts,
relform->relhasoids);
rel->rd_att->tdtypeid = relform->reltype;
- rel->rd_att->tdtypmod = -1; /* unnecessary, but... */
+ rel->rd_att->tdtypmod = -1; /* unnecessary, but... */
/* next read all the attribute tuple form data entries */
has_not_null = false;
/*
* Recompute lock and physical addressing info. This is needed in
- * case the pg_internal.init file was copied from some other database
- * by CREATE DATABASE.
+ * case the pg_internal.init file was copied from some other
+ * database by CREATE DATABASE.
*/
RelationInitLockInfo(rel);
RelationInitPhysicalAddr(rel);
RelationCacheInsert(rels[relno]);
/* also make a list of their OIDs, for RelationIdIsInInitFile */
initFileRelationIds = lcons_oid(RelationGetRelid(rels[relno]),
- initFileRelationIds);
+ initFileRelationIds);
}
pfree(rels);
}
/*
- * Write a magic number to serve as a file version identifier. We can
+ * Write a magic number to serve as a file version identifier. We can
* change the magic number whenever the relcache layout changes.
*/
magic = RELCACHE_INIT_FILEMAGIC;
/* also make a list of their OIDs, for RelationIdIsInInitFile */
oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
initFileRelationIds = lcons_oid(RelationGetRelid(rel),
- initFileRelationIds);
+ initFileRelationIds);
MemoryContextSwitchTo(oldcxt);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.94 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/syscache.c,v 1.95 2004/08/29 05:06:50 momjian Exp $
*
* NOTES
* These routines allow the parser/planner/executor to perform
}}
};
-static CatCache *SysCache[lengthof(cacheinfo)];
+static CatCache *SysCache[
+ lengthof(cacheinfo)];
static int SysCacheSize = lengthof(cacheinfo);
static bool CacheInitialized = false;
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.9 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/cache/typcache.c,v 1.10 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Stored record types are remembered in a linear array of TupleDescs,
* which can be indexed quickly with the assigned typmod. There is also
- * a hash table to speed searches for matching TupleDescs. The hash key
+ * a hash table to speed searches for matching TupleDescs. The hash key
* uses just the first N columns' type OIDs, and so we may have multiple
* entries with the same hash key.
*/
typedef struct RecordCacheEntry
{
/* the hash lookup key MUST BE FIRST */
- Oid hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
+ Oid hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
/* list of TupleDescs for record types with this hashkey */
List *tupdescs;
static int32 NextRecordTypmod = 0; /* number of entries used */
-static Oid lookup_default_opclass(Oid type_id, Oid am_id);
+static Oid lookup_default_opclass(Oid type_id, Oid am_id);
/*
else
{
/*
- * If we find a btree opclass where previously we only found
- * a hash opclass, forget the hash equality operator so we
- * can use the btree operator instead.
+ * If we find a btree opclass where previously we only found a
+ * hash opclass, forget the hash equality operator so we can
+ * use the btree operator instead.
*/
typentry->eq_opr = InvalidOid;
typentry->eq_opr_finfo.fn_oid = InvalidOid;
if (typentry->btree_opc != InvalidOid)
typentry->gt_opr = get_opclass_member(typentry->btree_opc,
InvalidOid,
- BTGreaterStrategyNumber);
+ BTGreaterStrategyNumber);
}
if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
typentry->cmp_proc == InvalidOid)
* Set up fmgr lookup info as requested
*
* Note: we tell fmgr the finfo structures live in CacheMemoryContext,
- * which is not quite right (they're really in DynaHashContext) but this
- * will do for our purposes.
+ * which is not quite right (they're really in DynaHashContext) but
+ * this will do for our purposes.
*/
if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
typentry->eq_opr_finfo.fn_oid == InvalidOid &&
typentry->eq_opr != InvalidOid)
{
- Oid eq_opr_func;
+ Oid eq_opr_func;
eq_opr_func = get_opcode(typentry->eq_opr);
if (eq_opr_func != InvalidOid)
{
Relation rel;
- if (!OidIsValid(typentry->typrelid)) /* should not happen */
+ if (!OidIsValid(typentry->typrelid)) /* should not happen */
elog(ERROR, "invalid typrelid for composite type %u",
typentry->type_id);
rel = relation_open(typentry->typrelid, AccessShareLock);
Assert(rel->rd_rel->reltype == typentry->type_id);
+
/*
* Notice that we simply store a link to the relcache's tupdesc.
* Since we are relying on relcache to detect cache flush events,
* lookup_default_opclass
*
* Given the OIDs of a datatype and an access method, find the default
- * operator class, if any. Returns InvalidOid if there is none.
+ * operator class, if any. Returns InvalidOid if there is none.
*/
static Oid
lookup_default_opclass(Oid type_id, Oid am_id)
* than one exact match, then someone put bogus entries in pg_opclass.
*
* This is the same logic as GetDefaultOpClass() in indexcmds.c, except
- * that we consider all opclasses, regardless of the current search path.
+ * that we consider all opclasses, regardless of the current search
+ * path.
*/
rel = heap_openr(OperatorClassRelationName, AccessShareLock);
}
else if (NextRecordTypmod >= RecordCacheArrayLen)
{
- int32 newlen = RecordCacheArrayLen * 2;
+ int32 newlen = RecordCacheArrayLen * 2;
RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
- newlen * sizeof(TupleDesc));
+ newlen * sizeof(TupleDesc));
RecordCacheArrayLen = newlen;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.28 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/assert.c,v 1.29 2004/08/29 05:06:50 momjian Exp $
*
* NOTE
* This should eventually work with elog()
else
{
write_stderr("TRAP: %s(\"%s\", File: \"%s\", Line: %d)\n",
- errorType, conditionName,
- fileName, lineNumber);
+ errorType, conditionName,
+ fileName, lineNumber);
}
#ifdef SLEEP_ON_ASSERT
+
/*
- * It would be nice to use pg_usleep() here, but only does 2000 sec
- * or 33 minutes, which seems too short.
- */
- sleep(1000000);
+ * It would be nice to use pg_usleep() here, but only does 2000 sec or
+ * 33 minutes, which seems too short.
+ */
+ sleep(1000000);
#endif
abort();
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.147 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/error/elog.c,v 1.148 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* GUC parameters */
PGErrorVerbosity Log_error_verbosity = PGERROR_VERBOSE;
-char *Log_line_prefix = NULL; /* format for extra log line info */
+char *Log_line_prefix = NULL; /* format for extra log line info */
int Log_destination = LOG_DESTINATION_STDERR;
#ifdef HAVE_SYSLOG
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
- errordata_stack_depth = -1; /* make room on stack */
+ errordata_stack_depth = -1; /* make room on stack */
ereport(PANIC, (errmsg_internal("ERRORDATA_STACK_SIZE exceeded")));
}
/*
* Check some other reasons for treating ERROR as FATAL:
*
- * 1. we have no handler to pass the error to (implies we are in
- * the postmaster or in backend startup).
+ * 1. we have no handler to pass the error to (implies we are in the
+ * postmaster or in backend startup).
*
* 2. ExitOnAnyError mode switch is set (initdb uses this).
*
ImmediateInterruptOK = false;
/*
- * Reset InterruptHoldoffCount in case we ereport'd from inside an
- * interrupt holdoff section. (We assume here that no handler
- * will itself be inside a holdoff section. If necessary, such
- * a handler could save and restore InterruptHoldoffCount for
- * itself, but this should make life easier for most.)
+ * Reset InterruptHoldoffCount in case we ereport'd from
+ * inside an interrupt holdoff section. (We assume here that
+ * no handler will itself be inside a holdoff section. If
+ * necessary, such a handler could save and restore
+ * InterruptHoldoffCount for itself, but this should make life
+ * easier for most.)
*/
InterruptHoldoffCount = 0;
- CritSectionCount = 0; /* should be unnecessary, but... */
+ CritSectionCount = 0; /* should be unnecessary, but... */
/*
- * Note that we leave CurrentMemoryContext set to ErrorContext.
- * The handler should reset it to something else soon.
+ * Note that we leave CurrentMemoryContext set to
+ * ErrorContext. The handler should reset it to something else
+ * soon.
*/
recursion_depth--;
* If we are doing FATAL or PANIC, abort any old-style COPY OUT in
* progress, so that we can report the message before dying. (Without
* this, pq_putmessage will refuse to send the message at all, which
- * is what we want for NOTICE messages, but not for fatal exits.)
- * This hack is necessary because of poor design of old-style copy
+ * is what we want for NOTICE messages, but not for fatal exits.) This
+ * hack is necessary because of poor design of old-style copy
* protocol. Note we must do this even if client is fool enough to
* have set client_min_messages above FATAL, so don't look at
* output_to_client.
whereToSendOutput = None;
/*
- * fflush here is just to improve the odds that we get to see
- * the error message, in case things are so hosed that
- * proc_exit crashes. Any other code you might be tempted to
- * add here should probably be in an on_proc_exit callback
- * instead.
+ * fflush here is just to improve the odds that we get to see the
+ * error message, in case things are so hosed that proc_exit
+ * crashes. Any other code you might be tempted to add here
+ * should probably be in an on_proc_exit callback instead.
*/
fflush(stdout);
fflush(stderr);
/*
- * If proc_exit is already running, we exit with nonzero exit code to
- * indicate that something's pretty wrong. We also want to exit with
- * nonzero exit code if not running under the postmaster (for example,
- * if we are being run from the initdb script, we'd better return an
- * error status).
+ * If proc_exit is already running, we exit with nonzero exit code
+ * to indicate that something's pretty wrong. We also want to
+ * exit with nonzero exit code if not running under the postmaster
+ * (for example, if we are being run from the initdb script, we'd
+ * better return an error status).
*/
proc_exit(proc_exit_inprogress || !IsUnderPostmaster);
}
/* Wrong object type or state */
case ENOTDIR: /* Not a directory */
case EISDIR: /* Is a directory */
-#if defined(ENOTEMPTY) && (ENOTEMPTY != EEXIST) /* same code on AIX */
- case ENOTEMPTY: /* Directory not empty */
+#if defined(ENOTEMPTY) && (ENOTEMPTY != EEXIST) /* same code on AIX */
+ case ENOTEMPTY: /* Directory not empty */
#endif
edata->sqlerrcode = ERRCODE_WRONG_OBJECT_TYPE;
break;
/*
* CopyErrorData --- obtain a copy of the topmost error stack entry
*
- * This is only for use in error handler code. The data is copied into the
+ * This is only for use in error handler code. The data is copied into the
* current memory context, so callers should always switch away from
* ErrorContext first; otherwise it will be lost when FlushErrorState is done.
*/
/*
* Reset stack to empty. The only case where it would be more than
* one deep is if we serviced an error that interrupted construction
- * of another message. We assume control escaped out of that
- * message construction and won't ever go back.
+ * of another message. We assume control escaped out of that message
+ * construction and won't ever go back.
*/
errordata_stack_depth = -1;
recursion_depth = 0;
*
* A handler can do CopyErrorData/FlushErrorState to get out of the error
* subsystem, then do some processing, and finally ReThrowError to re-throw
- * the original error. This is slower than just PG_RE_THROW() but should
+ * the original error. This is slower than just PG_RE_THROW() but should
* be used if the "some processing" is likely to incur another error.
*/
void
if (++errordata_stack_depth >= ERRORDATA_STACK_SIZE)
{
/*
- * Wups, stack not big enough. We treat this as a PANIC condition
+ * Wups, stack not big enough. We treat this as a PANIC condition
* because it suggests an infinite loop of errors during error
* recovery.
*/
- errordata_stack_depth = -1; /* make room on stack */
+ errordata_stack_depth = -1; /* make room on stack */
ereport(PANIC, (errmsg_internal("ERRORDATA_STACK_SIZE exceeded")));
}
0666)) < 0)
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", OutputFileName)));
+ errmsg("could not open file \"%s\": %m", OutputFileName)));
istty = isatty(fd);
close(fd);
if (!freopen(OutputFileName, "a", stdout))
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not reopen file \"%s\" as stdout: %m",
- OutputFileName)));
+ errmsg("could not reopen file \"%s\" as stdout: %m",
+ OutputFileName)));
}
}
write_eventlog(int level, const char *line)
{
static HANDLE evtHandle = INVALID_HANDLE_VALUE;
-
- if (evtHandle == INVALID_HANDLE_VALUE) {
- evtHandle = RegisterEventSource(NULL,"PostgreSQL");
- if (evtHandle == NULL) {
+
+ if (evtHandle == INVALID_HANDLE_VALUE)
+ {
+ evtHandle = RegisterEventSource(NULL, "PostgreSQL");
+ if (evtHandle == NULL)
+ {
evtHandle = INVALID_HANDLE_VALUE;
return;
}
ReportEvent(evtHandle,
level,
0,
- 0, /* All events are Id 0 */
+ 0, /* All events are Id 0 */
NULL,
1,
0,
&line,
NULL);
}
-#endif /* WIN32*/
+#endif /* WIN32 */
/*
* Format tag info for log lines; append to the provided buffer.
{
/* static counter for line numbers */
static long log_line_number = 0;
+
/* has counter been reset in current process? */
static int log_my_pid = 0;
- int format_len;
- int i;
+ int format_len;
+ int i;
/*
* This is one of the few places where we'd rather not inherit a
appendStringInfo(buf, "%s", username);
}
break;
- case 'd':
+ case 'd':
if (MyProcPort)
{
const char *dbname = MyProcPort->database_name;
if (MyProcPort)
{
appendStringInfo(buf, "%lx.%lx",
- (long)(MyProcPort->session_start.tv_sec),
- (long)MyProcPid);
+ (long) (MyProcPort->session_start.tv_sec),
+ (long) MyProcPid);
}
break;
case 'p':
- appendStringInfo(buf, "%ld", (long)MyProcPid);
+ appendStringInfo(buf, "%ld", (long) MyProcPid);
break;
case 'l':
appendStringInfo(buf, "%ld", log_line_number);
case 't':
{
/*
- * Note: for %t and %s we deliberately use the C library's
- * strftime/localtime, and not the equivalent functions
- * from src/timezone. This ensures that all backends
- * will report log entries in the same timezone, namely
- * whatever C-library setting they inherit from the
- * postmaster. If we used src/timezone then local
- * settings of the TimeZone GUC variable would confuse
- * the log.
+ * Note: for %t and %s we deliberately use the C
+ * library's strftime/localtime, and not the
+ * equivalent functions from src/timezone. This
+ * ensures that all backends will report log entries
+ * in the same timezone, namely whatever C-library
+ * setting they inherit from the postmaster. If we
+ * used src/timezone then local settings of the
+ * TimeZone GUC variable would confuse the log.
*/
- time_t stamp_time = time(NULL);
- char strfbuf[128];
+ time_t stamp_time = time(NULL);
+ char strfbuf[128];
strftime(strfbuf, sizeof(strfbuf),
"%Y-%m-%d %H:%M:%S %Z",
case 's':
if (MyProcPort)
{
- time_t stamp_time = MyProcPort->session_start.tv_sec;
- char strfbuf[128];
+ time_t stamp_time = MyProcPort->session_start.tv_sec;
+ char strfbuf[128];
strftime(strfbuf, sizeof(strfbuf),
"%Y-%m-%d %H:%M:%S %Z",
break;
case 'i':
if (MyProcPort)
- {
appendStringInfo(buf, "%s", MyProcPort->commandTag);
- }
break;
case 'r':
if (MyProcPort)
MyProcPort->remote_port);
}
break;
- case 'x':
+ case 'x':
/* in postmaster and friends, stop if %x is seen */
/* in a backend, just ignore */
if (MyProcPort == NULL)
i = format_len;
break;
- case '%':
+ case '%':
appendStringInfoChar(buf, '%');
break;
default:
}
/*
- * If the user wants the query that generated this error logged, do it.
+ * If the user wants the query that generated this error logged, do
+ * it.
*/
if (edata->elevel >= log_min_error_statement && debug_query_string != NULL)
{
#ifdef WIN32
if (Log_destination & LOG_DESTINATION_EVENTLOG)
{
- int eventlog_level;
- switch (edata->elevel)
+ int eventlog_level;
+
+ switch (edata->elevel)
{
case DEBUG5:
case DEBUG4:
#endif /* WIN32 */
/* Write to stderr, if enabled */
if ((Log_destination & LOG_DESTINATION_STDERR) || whereToSendOutput == Debug)
- {
fprintf(stderr, "%s", buf.data);
- }
/* If in the syslogger process, try to write messages direct to file */
if (am_syslogger)
static void
append_with_tabs(StringInfo buf, const char *str)
{
- char ch;
+ char ch;
while ((ch = *str++) != '\0')
{
}
-/*
+/*
* Write errors to stderr (or by equal means when stderr is
* not available). Used before ereport/elog can be used
- * safely (memory context, GUC load etc)
+ * safely (memory context, GUC load etc)
*/
void
write_stderr(const char *fmt,...)
{
- va_list ap;
+ va_list ap;
fmt = gettext(fmt);
/* On Unix, we just fprintf to stderr */
vfprintf(stderr, fmt, ap);
#else
- /* On Win32, we print to stderr if running on a console, or write to
- * eventlog if running as a service */
- if (pgwin32_is_service()) /* Running as a service */
+
+ /*
+ * On Win32, we print to stderr if running on a console, or write to
+ * eventlog if running as a service
+ */
+ if (pgwin32_is_service()) /* Running as a service */
{
- char errbuf[2048]; /* Arbitrary size? */
+ char errbuf[2048]; /* Arbitrary size? */
vsnprintf(errbuf, sizeof(errbuf), fmt, ap);
-
+
write_eventlog(EVENTLOG_ERROR_TYPE, errbuf);
}
- else /* Not running as service, write to stderr */
+ else
+/* Not running as service, write to stderr */
vfprintf(stderr, fmt, ap);
#endif
va_end(ap);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.77 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/dfmgr.c,v 1.78 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
struct df_files *next; /* List link */
dev_t device; /* Device file is on */
-#ifndef WIN32 /* ensures we never again depend on this under win32 */
+#ifndef WIN32 /* ensures we never again depend on this
+ * under win32 */
ino_t inode; /* Inode number of file */
#endif
void *handle; /* a handle for pg_dl* functions */
errmsg("could not access file \"%s\": %m", fullname)));
/*
- * We have to zap all entries in the list that match on either filename
- * or inode, else load_external_function() won't do anything.
+ * We have to zap all entries in the list that match on either
+ * filename or inode, else load_external_function() won't do anything.
*/
prv = NULL;
for (file_scanner = file_list; file_scanner != NULL; file_scanner = nxt)
{
const char *sep_ptr;
char *ret;
-
+
AssertArg(name != NULL);
if (name[0] != '$')
if ((sep_ptr = first_dir_separator(name)) == NULL)
sep_ptr = name + strlen(name);
-
+
if (strlen("$libdir") != sep_ptr - name ||
strncmp(name, "$libdir", strlen("$libdir")) != 0)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("invalid macro name in dynamic library path: %s", name)));
+ errmsg("invalid macro name in dynamic library path: %s", name)));
ret = palloc(strlen(pkglib_path) + strlen(sep_ptr) + 1);
char *full;
piece = first_path_separator(p);
- if(piece == p)
+ if (piece == p)
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("zero-length component in parameter \"dynamic_library_path\"")));
+ errmsg("zero-length component in parameter \"dynamic_library_path\"")));
- if(piece == 0)
- len = strlen(p);
+ if (piece == 0)
+ len = strlen(p);
else
- len = piece - p;
+ len = piece - p;
piece = palloc(len + 1);
strncpy(piece, p, len);
if (!is_absolute_path(mangled))
ereport(ERROR,
(errcode(ERRCODE_INVALID_NAME),
- errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
+ errmsg("component in parameter \"dynamic_library_path\" is not an absolute path")));
full = palloc(strlen(mangled) + 1 + baselen + 1);
sprintf(full, "%s/%s", mangled, basename);
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.16 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/fmgr/funcapi.c,v 1.17 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* First call
*/
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
/*
* Allocate suitably long-lived space and zero it
* at the beginning of each call, the Slot will hold a dangling
* pointer to an already-recycled tuple. We clear it out here.
*
- * Note: use of retval->slot is obsolete as of 8.0, and we expect that
- * it will always be NULL. This is just here for backwards compatibility
+ * Note: use of retval->slot is obsolete as of 8.0, and we expect that it
+ * will always be NULL. This is just here for backwards compatibility
* in case someone creates a slot anyway.
*/
if (retval->slot != NULL)
void
end_MultiFuncCall(PG_FUNCTION_ARGS, FuncCallContext *funcctx)
{
- ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsi = (ReturnSetInfo *) fcinfo->resultinfo;
/* Deregister the shutdown callback */
UnregisterExprContextCallback(rsi->econtext,
static void
shutdown_MultiFuncCall(Datum arg)
{
- FmgrInfo *flinfo = (FmgrInfo *) DatumGetPointer(arg);
+ FmgrInfo *flinfo = (FmgrInfo *) DatumGetPointer(arg);
FuncCallContext *funcctx = (FuncCallContext *) flinfo->fn_extra;
/* unbind from flinfo */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.52 2004/08/29 04:12:53 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/hash/dynahash.c,v 1.53 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
#define MEM_ALLOC DynaHashAlloc
-#undef MEM_FREE /* already in windows header files */
+#undef MEM_FREE /* already in windows header files */
#define MEM_FREE pfree
/*
* If you don't specify a match function, it defaults to strncmp() if
* you used string_hash (either explicitly or by default) and to
- * memcmp() otherwise. (Prior to PostgreSQL 7.4, memcmp() was always
+ * memcmp() otherwise. (Prior to PostgreSQL 7.4, memcmp() was always
* used.)
*/
if (flags & HASH_COMPARE)
/*
* Relocate records to the new bucket. NOTE: because of the way the
- * hash masking is done in calc_bucket, only one old bucket can need to
- * be split at this point. With a different way of reducing the hash
- * value, that might not be true!
+ * hash masking is done in calc_bucket, only one old bucket can need
+ * to be split at this point. With a different way of reducing the
+ * hash value, that might not be true!
*/
old_segnum = old_bucket >> hctl->sshift;
old_segndx = MOD(old_bucket, hctl->ssize);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.93 2004/08/29 04:12:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/globals.c,v 1.94 2004/08/29 05:06:50 momjian Exp $
*
* NOTES
* Globals used all over the place should be declared here and not
char pkglib_path[MAXPGPATH]; /* full path to lib directory */
#ifdef EXEC_BACKEND
-char postgres_exec_path[MAXPGPATH]; /* full path to backend */
+char postgres_exec_path[MAXPGPATH]; /* full path to backend */
+
/* note: currently this is not valid in backend processes */
#endif
int NBuffers = 1000;
int MaxBackends = 100;
-int VacuumCostPageHit = 1; /* GUC parameters for vacuum */
+int VacuumCostPageHit = 1; /* GUC parameters for vacuum */
int VacuumCostPageMiss = 10;
int VacuumCostPageDirty = 20;
int VacuumCostLimit = 200;
int VacuumCostDelay = 0;
-int VacuumCostBalance = 0; /* working state for vacuum */
+int VacuumCostBalance = 0; /* working state for vacuum */
bool VacuumCostActive = false;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.131 2004/08/29 04:12:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/miscinit.c,v 1.132 2004/08/29 05:06:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* NOTE: "ignoring system indexes" means we do not use the system indexes
* for lookups (either in hardwired catalog accesses or in planner-generated
- * plans). We do, however, still update the indexes when a catalog
+ * plans). We do, however, still update the indexes when a catalog
* modification is made.
* ----------------------------------------------------------------
*/
/*
* ResetReindexProcessing
- * Unset reindexing status.
+ * Unset reindexing status.
*/
void
ResetReindexProcessing(void)
errmsg("lock file \"%s\" already exists",
filename),
isDDLock ?
- errhint("Is another %s (PID %d) running in data directory \"%s\"?",
+ errhint("Is another %s (PID %d) running in data directory \"%s\"?",
(encoded_pid < 0 ? "postgres" : "postmaster"),
- (int) other_pid, refName) :
+ (int) other_pid, refName) :
errhint("Is another %s (PID %d) using socket file \"%s\"?",
(encoded_pid < 0 ? "postgres" : "postmaster"),
(int) other_pid, refName)));
"(key %lu, ID %lu) is still in use",
id1, id2),
errhint("If you're sure there are no old "
- "server processes still running, remove "
+ "server processes still running, remove "
"the shared memory block with "
"the command \"ipcrm\", or just delete the file \"%s\".",
filename)));
errmsg("could not remove old lock file \"%s\": %m",
filename),
errhint("The file seems accidentally left over, but "
- "it could not be removed. Please remove the file "
+ "it could not be removed. Please remove the file "
"by hand and try again.")));
}
else
ereport(FATAL,
(errcode_for_file_access(),
- errmsg("could not open file \"%s\": %m", full_path)));
+ errmsg("could not open file \"%s\": %m", full_path)));
}
ret = fscanf(file, "%ld.%ld", &file_major, &file_minor);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.136 2004/08/29 04:12:54 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/init/postinit.c,v 1.137 2004/08/29 05:06:51 momjian Exp $
*
*
*-------------------------------------------------------------------------
AmiTransactionOverride(bootstrap);
/*
- * Initialize local process's access to XLOG. In bootstrap case
- * we may skip this since StartupXLOG() was run instead.
+ * Initialize local process's access to XLOG. In bootstrap case we
+ * may skip this since StartupXLOG() was run instead.
*/
if (!bootstrap)
InitXLOGAccess();
* (currently mule internal code (mic) is used)
* Tatsuo Ishii
*
- * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.46 2004/03/15 10:41:25 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/mbutils.c,v 1.47 2004/08/29 05:06:51 momjian Exp $
*/
#include "postgres.h"
{
ereport(LOG,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("default conversion function for encoding \"%s\" to \"%s\" does not exist",
- pg_encoding_to_char(src_encoding),
- pg_encoding_to_char(dest_encoding))));
+ errmsg("default conversion function for encoding \"%s\" to \"%s\" does not exist",
+ pg_encoding_to_char(src_encoding),
+ pg_encoding_to_char(dest_encoding))));
return src;
}
/*
* conversion functions between pg_wchar and multibyte streams.
* Tatsuo Ishii
- * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.36 2004/03/15 10:41:25 ishii Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mb/wchar.c,v 1.37 2004/08/29 05:06:51 momjian Exp $
*
* WIN1250 client encoding updated by Pavel Behal
*
static int
pg_utf_dsplen(const unsigned char *s)
{
- return 1; /* XXX fix me! */
+ return 1; /* XXX fix me! */
}
/*
static int
pg_mule_dsplen(const unsigned char *s)
{
- return 1; /* XXX fix me! */
+ return 1; /* XXX fix me! */
}
/*
len = 1;
}
else
- {
len = 2;
- }
return (len);
}
{pg_johab2wchar_with_len, pg_johab_mblen, pg_johab_dsplen, 3}, /* 5; PG_JOHAB */
{pg_utf2wchar_with_len, pg_utf_mblen, pg_utf_dsplen, 3}, /* 6; PG_UNICODE */
{pg_mule2wchar_with_len, pg_mule_mblen, pg_mule_dsplen, 3}, /* 7; PG_MULE_INTERNAL */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 8; PG_LATIN1 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 9; PG_LATIN2 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 10; PG_LATIN3 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 11; PG_LATIN4 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 12; PG_LATIN5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 13; PG_LATIN6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 14; PG_LATIN7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 15; PG_LATIN8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 16; PG_LATIN9 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 17; PG_LATIN10 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 18; PG_WIN1256 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 19; PG_TCVN */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 20; PG_WIN874 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 21; PG_KOI8 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 22; PG_WIN1251 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 23; PG_ALT */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 24; ISO-8859-5 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 25; ISO-8859-6 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 26; ISO-8859-7 */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 27; ISO-8859-8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 8; PG_LATIN1 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 9; PG_LATIN2 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 10; PG_LATIN3 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 11; PG_LATIN4 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 12; PG_LATIN5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 13; PG_LATIN6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 14; PG_LATIN7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 15; PG_LATIN8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 16; PG_LATIN9 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 17; PG_LATIN10 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 18; PG_WIN1256 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 19; PG_TCVN */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 20; PG_WIN874 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 21; PG_KOI8 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 22; PG_WIN1251 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 23; PG_ALT */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 24; ISO-8859-5 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 25; ISO-8859-6 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 26; ISO-8859-7 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 27; ISO-8859-8 */
{0, pg_sjis_mblen, pg_sjis_dsplen, 2}, /* 28; PG_SJIS */
- {0, pg_big5_mblen, pg_big5_dsplen,2}, /* 29; PG_BIG5 */
+ {0, pg_big5_mblen, pg_big5_dsplen, 2}, /* 29; PG_BIG5 */
{0, pg_gbk_mblen, pg_gbk_dsplen, 2}, /* 30; PG_GBK */
{0, pg_uhc_mblen, pg_uhc_dsplen, 2}, /* 31; PG_UHC */
- {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 32; PG_WIN1250 */
- {0, pg_gb18030_mblen, pg_gb18030_dsplen, 2} /* 33; PG_GB18030 */
+ {pg_latin12wchar_with_len, pg_latin1_mblen, pg_latin1_dsplen, 1}, /* 32; PG_WIN1250 */
+ {0, pg_gb18030_mblen, pg_gb18030_dsplen, 2} /* 33; PG_GB18030 */
};
/* returns the byte length of a word for mule internal code */
return false;
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("Unicode characters greater than or equal to 0x10000 are not supported")));
+ errmsg("Unicode characters greater than or equal to 0x10000 are not supported")));
}
l = pg_mblen(mbstr);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
- errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
- GetDatabaseEncodingName(), buf)));
+ errmsg("invalid byte sequence for encoding \"%s\": 0x%s",
+ GetDatabaseEncodingName(), buf)));
}
}
* Written by Peter Eisentraut
.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.233 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/guc.c,v 1.234 2004/08/29 05:06:51 momjian Exp $
*
*--------------------------------------------------------------------
*/
#include "utils/pg_locale.h"
#include "pgstat.h"
-char *guc_pgdata;
-char *guc_hbafile;
-char *guc_identfile;
-char *external_pidfile;
+char *guc_pgdata;
+char *guc_hbafile;
+char *guc_identfile;
+char *external_pidfile;
-char *user_pgconfig = NULL;
-bool user_pgconfig_is_dir = false;
+char *user_pgconfig = NULL;
+bool user_pgconfig_is_dir = false;
#ifndef PG_KRB_SRVTAB
#define PG_KRB_SRVTAB ""
extern int DebugSharedBuffers;
static const char *assign_log_destination(const char *value,
- bool doit, GucSource source);
+ bool doit, GucSource source);
#ifdef HAVE_SYSLOG
extern char *Syslog_facility;
static const char *assign_defaultxactisolevel(const char *newval, bool doit,
GucSource source);
static const char *assign_log_min_messages(const char *newval, bool doit,
- GucSource source);
+ GucSource source);
static const char *assign_client_min_messages(const char *newval,
bool doit, GucSource source);
static const char *assign_min_error_statement(const char *newval, bool doit,
GucSource source);
static const char *assign_msglvl(int *var, const char *newval, bool doit,
- GucSource source);
+ GucSource source);
static const char *assign_log_error_verbosity(const char *newval, bool doit,
GucSource source);
static const char *assign_log_statement(const char *newval, bool doit,
- GucSource source);
+ GucSource source);
static const char *assign_log_stmtlvl(int *var, const char *newval,
- bool doit, GucSource source);
+ bool doit, GucSource source);
static bool assign_phony_autocommit(bool newval, bool doit, GucSource source);
static const char *assign_custom_variable_classes(const char *newval, bool doit,
- GucSource source);
+ GucSource source);
static bool assign_stage_log_stats(bool newval, bool doit, GucSource source);
static bool assign_log_stats(bool newval, bool doit, GucSource source);
static bool assign_transaction_read_only(bool newval, bool doit, GucSource source);
static int max_identifier_length;
static int block_size;
static bool integer_datetimes;
+
/* should be static, but commands/variable.c needs to get at it */
-char *session_authorization_string;
+char *session_authorization_string;
/*
*/
const char *const GucSource_Names[] =
{
- /* PGC_S_DEFAULT */ "default",
- /* PGC_S_ENV_VAR */ "environment variable",
- /* PGC_S_FILE */ "configuration file",
- /* PGC_S_ARGV */ "command line",
- /* PGC_S_UNPRIVILEGED */ "unprivileged",
- /* PGC_S_DATABASE */ "database",
- /* PGC_S_USER */ "user",
- /* PGC_S_CLIENT */ "client",
- /* PGC_S_OVERRIDE */ "override",
- /* PGC_S_INTERACTIVE */ "interactive",
- /* PGC_S_TEST */ "test",
- /* PGC_S_SESSION */ "session"
+ /* PGC_S_DEFAULT */ "default",
+ /* PGC_S_ENV_VAR */ "environment variable",
+ /* PGC_S_FILE */ "configuration file",
+ /* PGC_S_ARGV */ "command line",
+ /* PGC_S_UNPRIVILEGED */ "unprivileged",
+ /* PGC_S_DATABASE */ "database",
+ /* PGC_S_USER */ "user",
+ /* PGC_S_CLIENT */ "client",
+ /* PGC_S_OVERRIDE */ "override",
+ /* PGC_S_INTERACTIVE */ "interactive",
+ /* PGC_S_TEST */ "test",
+ /* PGC_S_SESSION */ "session"
};
/*
* TO ADD AN OPTION:
*
* 1. Declare a global variable of type bool, int, double, or char*
- * and make use of it.
+ * and make use of it.
*
* 2. Decide at what times it's safe to set the option. See guc.h for
- * details.
+ * details.
*
* 3. Decide on a name, a default value, upper and lower bounds (if
- * applicable), etc.
+ * applicable), etc.
*
* 4. Add a record below.
*
* 5. Add it to src/backend/utils/misc/postgresql.conf.sample, if
- * appropriate
+ * appropriate
*
* 6. Add it to src/bin/psql/tab-complete.c, if it's a USERSET option.
*
* 7. Don't forget to document the option.
*
* 8. If it's a new GUC_LIST option you must edit pg_dumpall.c to ensure
- * it is not single quoted at dump time.
+ * it is not single quoted at dump time.
*/
{"silent_mode", PGC_POSTMASTER, LOGGING_WHEN,
gettext_noop("Runs the server silently."),
gettext_noop("If this parameter is set, the server will automatically run in the "
- "background and any controlling terminals are dissociated.")
+ "background and any controlling terminals are dissociated.")
},
&SilentMode,
false, NULL, NULL
},
{
{"log_disconnections", PGC_BACKEND, LOGGING_WHAT,
- gettext_noop("Logs end of a session, including duration"),
- NULL
+ gettext_noop("Logs end of a session, including duration"),
+ NULL
},
&Log_disconnections,
false, NULL, NULL
},
{
{"default_with_oids", PGC_USERSET, COMPAT_OPTIONS_PREVIOUS,
- gettext_noop("By default, newly-created tables should have OIDs"),
- NULL
+ gettext_noop("By default, newly-created tables should have OIDs"),
+ NULL
},
&default_with_oids,
true, NULL, NULL
},
{
{"redirect_stderr", PGC_POSTMASTER, LOGGING_WHERE,
- gettext_noop("Start a subprocess to capture stderr output into log files"),
- NULL
+ gettext_noop("Start a subprocess to capture stderr output into log files"),
+ NULL
},
&Redirect_stderr,
false, NULL, NULL
{"default_statistics_target", PGC_USERSET, QUERY_TUNING_OTHER,
gettext_noop("Sets the default statistics target."),
gettext_noop("This applies to table columns that have not had a "
- "column-specific target set via ALTER TABLE SET STATISTICS.")
+ "column-specific target set via ALTER TABLE SET STATISTICS.")
},
&default_statistics_target,
10, 1, 1000, NULL, NULL
{"work_mem", PGC_USERSET, RESOURCES_MEM,
gettext_noop("Sets the maximum memory to be used for query workspaces."),
gettext_noop("This much memory may be used by each internal "
- "sort operation and hash table before switching to "
+ "sort operation and hash table before switching to "
"temporary disk files.")
},
&work_mem,
{
{"log_rotation_age", PGC_SIGHUP, LOGGING_WHERE,
- gettext_noop("Automatic logfile rotation will occur after N minutes"),
- NULL
+ gettext_noop("Automatic logfile rotation will occur after N minutes"),
+ NULL
},
&Log_RotationAge,
- 24*60, 0, INT_MAX, NULL, NULL
+ 24 * 60, 0, INT_MAX, NULL, NULL
},
{
{"log_rotation_size", PGC_SIGHUP, LOGGING_WHERE,
- gettext_noop("Automatic logfile rotation will occur after N kilobytes"),
- NULL
+ gettext_noop("Automatic logfile rotation will occur after N kilobytes"),
+ NULL
},
&Log_RotationSize,
- 10*1024, 0, INT_MAX, NULL, NULL
+ 10 * 1024, 0, INT_MAX, NULL, NULL
},
{
static struct config_string ConfigureNamesString[] =
{
- {
- {"archive_command", PGC_SIGHUP, WAL_SETTINGS,
- gettext_noop("WAL archiving command."),
- gettext_noop("The shell command that will be called to archive a WAL file.")
- },
- &XLogArchiveCommand,
- "", NULL, NULL
- },
+ {
+ {"archive_command", PGC_SIGHUP, WAL_SETTINGS,
+ gettext_noop("WAL archiving command."),
+ gettext_noop("The shell command that will be called to archive a WAL file.")
+ },
+ &XLogArchiveCommand,
+ "", NULL, NULL
+ },
{
{"client_encoding", PGC_USERSET, CLIENT_CONN_LOCALE,
{
{"log_line_prefix", PGC_SIGHUP, LOGGING_WHAT,
- gettext_noop("Controls information prefixed to each log line"),
- gettext_noop("if blank no prefix is used")
+ gettext_noop("Controls information prefixed to each log line"),
+ gettext_noop("if blank no prefix is used")
},
&Log_line_prefix,
"", NULL, NULL
{"default_transaction_isolation", PGC_USERSET, CLIENT_CONN_STATEMENT,
gettext_noop("Sets the transaction isolation level of each new transaction."),
gettext_noop("Each SQL transaction has an isolation level, which "
- "can be either \"read uncommitted\", \"read committed\", \"repeatable read\", or \"serializable\".")
+ "can be either \"read uncommitted\", \"read committed\", \"repeatable read\", or \"serializable\".")
},
&default_iso_level_string,
"read committed", assign_defaultxactisolevel, NULL
{
{"log_destination", PGC_SIGHUP, LOGGING_WHERE,
- gettext_noop("Sets the destination for server log output."),
- gettext_noop("Valid values are combinations of stderr, syslog "
- "and eventlog, depending on platform."),
- GUC_LIST_INPUT
+ gettext_noop("Sets the destination for server log output."),
+ gettext_noop("Valid values are combinations of stderr, syslog "
+ "and eventlog, depending on platform."),
+ GUC_LIST_INPUT
},
&log_destination_string,
"stderr", assign_log_destination, NULL
},
{
{"log_directory", PGC_SIGHUP, LOGGING_WHERE,
- gettext_noop("Sets the destination directory for logfiles."),
- gettext_noop("May be specified as relative to the cluster directory "
- "or as absolute path.")
+ gettext_noop("Sets the destination directory for logfiles."),
+ gettext_noop("May be specified as relative to the cluster directory "
+ "or as absolute path.")
},
&Log_directory,
"pg_log", NULL, NULL
},
{
{"log_filename_prefix", PGC_SIGHUP, LOGGING_WHERE,
- gettext_noop("Prefix for file names created in the log_directory."),
- NULL
+ gettext_noop("Prefix for file names created in the log_directory."),
+ NULL
},
&Log_filename_prefix,
"postgresql-", NULL, NULL
},
{
- {"pgdata", PGC_POSTMASTER, 0, gettext_noop("Sets the location of the data directory"), NULL},
+ {"pgdata", PGC_POSTMASTER, 0, gettext_noop("Sets the location of the data directory"), NULL},
&guc_pgdata,
NULL, assign_canonical_path, NULL
},
{
- {"hba_conf", PGC_SIGHUP, 0, gettext_noop("Sets the location of the \"hba\" configuration file"), NULL},
+ {"hba_conf", PGC_SIGHUP, 0, gettext_noop("Sets the location of the \"hba\" configuration file"), NULL},
&guc_hbafile,
NULL, assign_canonical_path, NULL
},
{
- {"ident_conf", PGC_SIGHUP, 0, gettext_noop("Sets the location of the \"ident\" configuration file"), NULL},
+ {"ident_conf", PGC_SIGHUP, 0, gettext_noop("Sets the location of the \"ident\" configuration file"), NULL},
&guc_identfile,
NULL, assign_canonical_path, NULL
},
/******** end of options list ********/
-
+
/*
* To allow continued support of obsolete names for GUC variables, we apply
* the following mappings to any unrecognized name. Note that an old name
* should be mapped to a new one only if the new variable has very similar
* semantics to the old.
*/
-static const char * const map_old_guc_names[] = {
+static const char *const map_old_guc_names[] = {
"sort_mem", "work_mem",
"vacuum_mem", "maintenance_work_mem",
NULL
static struct config_generic **guc_variables;
/* Current number of variables contained in the vector */
-static int num_guc_variables;
+static int num_guc_variables;
/* Vector capacity */
-static int size_guc_variables;
+static int size_guc_variables;
static bool guc_dirty; /* TRUE if need to do commit/abort work */
static int guc_var_compare(const void *a, const void *b);
static int guc_name_compare(const char *namea, const char *nameb);
-static void push_old_value(struct config_generic *gconf);
+static void push_old_value(struct config_generic * gconf);
static void ReportGUCOption(struct config_generic * record);
static char *_ShowOption(struct config_generic * record);
static void *
guc_malloc(int elevel, size_t size)
{
- void *data;
+ void *data;
data = malloc(size);
if (data == NULL)
static void *
guc_realloc(int elevel, void *old, size_t size)
{
- void *data;
+ void *data;
data = realloc(old, size);
if (data == NULL)
static char *
guc_strdup(int elevel, const char *src)
{
- char *data;
+ char *data;
data = strdup(src);
if (data == NULL)
* states).
*/
static void
-set_string_field(struct config_string *conf, char **field, char *newval)
+set_string_field(struct config_string * conf, char **field, char *newval)
{
- char *oldval = *field;
- GucStack *stack;
+ char *oldval = *field;
+ GucStack *stack;
/* Do the assignment */
*field = newval;
* Detect whether strval is referenced anywhere in a GUC string item
*/
static bool
-string_field_used(struct config_string *conf, char *strval)
+string_field_used(struct config_string * conf, char *strval)
{
- GucStack *stack;
+ GucStack *stack;
if (strval == *(conf->variable) ||
strval == conf->reset_val ||
void
build_guc_variables(void)
{
- int size_vars;
+ int size_vars;
int num_vars = 0;
struct config_generic **guc_vars;
int i;
num_vars++;
}
- /* Create table with 20% slack
+ /*
+ * Create table with 20% slack
*/
size_vars = num_vars + num_vars / 4;
guc_variables = guc_vars;
num_guc_variables = num_vars;
size_guc_variables = size_vars;
- qsort((void*) guc_variables, num_guc_variables,
- sizeof(struct config_generic*), guc_var_compare);
+ qsort((void *) guc_variables, num_guc_variables,
+ sizeof(struct config_generic *), guc_var_compare);
}
static bool
is_custom_class(const char *name, int dotPos)
{
- /* The assign_custom_variable_classes has made sure no empty
+ /*
+ * The assign_custom_variable_classes has made sure no empty
* identifiers or whitespace exists in the variable
*/
- bool result = false;
+ bool result = false;
const char *ccs = GetConfigOption("custom_variable_classes");
- if(ccs != NULL)
+
+ if (ccs != NULL)
{
const char *start = ccs;
- for(;; ++ccs)
+
+ for (;; ++ccs)
{
- int c = *ccs;
- if(c == 0 || c == ',')
+ int c = *ccs;
+
+ if (c == 0 || c == ',')
{
- if(dotPos == ccs - start && strncmp(start, name, dotPos) == 0)
+ if (dotPos == ccs - start && strncmp(start, name, dotPos) == 0)
{
result = true;
break;
}
- if(c == 0)
+ if (c == 0)
break;
start = ccs + 1;
}
* list is expanded if needed.
*/
static bool
-add_guc_variable(struct config_generic *var, int elevel)
+add_guc_variable(struct config_generic * var, int elevel)
{
- if(num_guc_variables + 1 >= size_guc_variables)
+ if (num_guc_variables + 1 >= size_guc_variables)
{
- /* Increase the vector by 25%
+ /*
+ * Increase the vector by 25%
*/
- int size_vars = size_guc_variables + size_guc_variables / 4;
- struct config_generic** guc_vars;
+ int size_vars = size_guc_variables + size_guc_variables / 4;
+ struct config_generic **guc_vars;
- if(size_vars == 0)
+ if (size_vars == 0)
{
size_vars = 100;
- guc_vars = (struct config_generic**)
- guc_malloc(elevel, size_vars * sizeof(struct config_generic*));
+ guc_vars = (struct config_generic **)
+ guc_malloc(elevel, size_vars * sizeof(struct config_generic *));
}
else
{
- guc_vars = (struct config_generic**)
- guc_realloc(elevel, guc_variables, size_vars * sizeof(struct config_generic*));
+ guc_vars = (struct config_generic **)
+ guc_realloc(elevel, guc_variables, size_vars * sizeof(struct config_generic *));
}
- if(guc_vars == NULL)
+ if (guc_vars == NULL)
return false; /* out of memory */
guc_variables = guc_vars;
size_guc_variables = size_vars;
}
guc_variables[num_guc_variables++] = var;
- qsort((void*) guc_variables, num_guc_variables,
- sizeof(struct config_generic*), guc_var_compare);
+ qsort((void *) guc_variables, num_guc_variables,
+ sizeof(struct config_generic *), guc_var_compare);
return true;
}
* Create and add a placeholder variable. It's presumed to belong
* to a valid custom variable class at this point.
*/
-static struct config_string*
+static struct config_string *
add_placeholder_variable(const char *name, int elevel)
{
- size_t sz = sizeof(struct config_string) + sizeof(char*);
- struct config_string* var;
- struct config_generic* gen;
+ size_t sz = sizeof(struct config_string) + sizeof(char *);
+ struct config_string *var;
+ struct config_generic *gen;
- var = (struct config_string*)guc_malloc(elevel, sz);
- if(var == NULL)
+ var = (struct config_string *) guc_malloc(elevel, sz);
+ if (var == NULL)
return NULL;
gen = &var->gen;
memset(var, 0, sz);
gen->name = guc_strdup(elevel, name);
- if(gen->name == NULL)
+ if (gen->name == NULL)
{
free(var);
return NULL;
}
- gen->context = PGC_USERSET;
- gen->group = CUSTOM_OPTIONS;
+ gen->context = PGC_USERSET;
+ gen->group = CUSTOM_OPTIONS;
gen->short_desc = "GUC placeholder variable";
- gen->flags = GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_CUSTOM_PLACEHOLDER;
- gen->vartype = PGC_STRING;
+ gen->flags = GUC_NO_SHOW_ALL | GUC_NOT_IN_SAMPLE | GUC_CUSTOM_PLACEHOLDER;
+ gen->vartype = PGC_STRING;
- /* The char* is allocated at the end of the struct since we have
- * no 'static' place to point to.
- */
- var->variable = (char**)(var + 1);
+ /*
+ * The char* is allocated at the end of the struct since we have no
+ * 'static' place to point to.
+ */
+ var->variable = (char **) (var + 1);
- if(!add_guc_variable((struct config_generic*) var, elevel))
+ if (!add_guc_variable((struct config_generic *) var, elevel))
{
free((void *) gen->name);
free(var);
return *res;
/*
- * See if the name is an obsolete name for a variable. We assume that
+ * See if the name is an obsolete name for a variable. We assume that
* the set of supported old names is short enough that a brute-force
* search is the best way.
*/
for (i = 0; map_old_guc_names[i] != NULL; i += 2)
{
if (guc_name_compare(name, map_old_guc_names[i]) == 0)
- return find_option(map_old_guc_names[i+1], elevel);
+ return find_option(map_old_guc_names[i + 1], elevel);
}
/*
* maps to a custom variable class.
*/
dot = strchr(name, GUC_QUALIFIER_SEPARATOR);
- if(dot != NULL && is_custom_class(name, dot - name))
+ if (dot != NULL && is_custom_class(name, dot - name))
/* Add a placeholder variable for this name */
- return (struct config_generic*)add_placeholder_variable(name, elevel);
+ return (struct config_generic *) add_placeholder_variable(name, elevel);
/* Unknown name */
return NULL;
struct config_string *conf = (struct config_string *) gconf;
char *str;
- /* Check to make sure we only have valid PGC_USERLIMITs */
+ /*
+ * Check to make sure we only have valid
+ * PGC_USERLIMITs
+ */
Assert(conf->gen.context != PGC_USERLIMIT ||
conf->assign_hook == assign_log_min_messages ||
- conf->assign_hook == assign_min_error_statement ||
+ conf->assign_hook == assign_min_error_statement ||
conf->assign_hook == assign_log_statement);
*conf->variable = NULL;
conf->reset_val = NULL;
* the proper value available to restore the setting to.
*/
static void
-push_old_value(struct config_generic *gconf)
+push_old_value(struct config_generic * gconf)
{
int my_level = GetCurrentTransactionNestLevel();
GucStack *stack;
/*
* We keep all the stack entries in TopTransactionContext so as to
- * avoid allocation problems when a subtransaction back-fills stack
- * entries for upper transaction levels.
+ * avoid allocation problems when a subtransaction back-fills
+ * stack entries for upper transaction levels.
*/
stack = (GucStack *) MemoryContextAlloc(TopTransactionContext,
sizeof(GucStack));
Assert(stack->nest_level == my_level);
/*
- * We will pop the stack entry. Start by restoring outer xact status
- * (since we may want to modify it below). Be careful to use
- * my_status to reference the inner xact status below this point...
+ * We will pop the stack entry. Start by restoring outer xact
+ * status (since we may want to modify it below). Be careful to
+ * use my_status to reference the inner xact status below this
+ * point...
*/
gconf->status = stack->status;
/*
* We have two cases:
*
- * If commit and HAVE_TENTATIVE, set actual value to tentative
- * (this is to override a SET LOCAL if one occurred later than SET).
- * We keep the tentative value and propagate HAVE_TENTATIVE to
- * the parent status, allowing the SET's effect to percolate up.
- * (But if we're exiting the outermost transaction, we'll drop the
+ * If commit and HAVE_TENTATIVE, set actual value to tentative (this
+ * is to override a SET LOCAL if one occurred later than SET). We
+ * keep the tentative value and propagate HAVE_TENTATIVE to the
+ * parent status, allowing the SET's effect to percolate up. (But
+ * if we're exiting the outermost transaction, we'll drop the
* HAVE_TENTATIVE bit below.)
*
- * Otherwise, we have a transaction that aborted or executed only
- * SET LOCAL (or no SET at all). In either case it should have no
- * further effect, so restore both tentative and actual values from
- * the stack entry.
+ * Otherwise, we have a transaction that aborted or executed only SET
+ * LOCAL (or no SET at all). In either case it should have no
+ * further effect, so restore both tentative and actual values
+ * from the stack entry.
*/
useTentative = isCommit && (my_status & GUC_HAVE_TENTATIVE) != 0;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
{
if (conf->assign_hook)
if (!(*conf->assign_hook) (newval,
- true, PGC_S_OVERRIDE))
+ true, PGC_S_OVERRIDE))
elog(LOG, "failed to commit %s",
conf->gen.name);
*conf->variable = newval;
const char *newstr;
newstr = (*conf->assign_hook) (newval, true,
- PGC_S_OVERRIDE);
+ PGC_S_OVERRIDE);
if (newstr == NULL)
elog(LOG, "failed to commit %s",
conf->gen.name);
pfree(stack);
/*
- * If we're now out of all xact levels, forget TENTATIVE status bit;
- * there's nothing tentative about the value anymore.
+ * If we're now out of all xact levels, forget TENTATIVE status
+ * bit; there's nothing tentative about the value anymore.
*/
if (!isSubXact)
{
}
/*
- * If we're now out of all xact levels, we can clear guc_dirty.
- * (Note: we cannot reset guc_dirty when exiting a subtransaction,
- * because we know that all outer transaction levels will have stacked
- * values to deal with.)
+ * If we're now out of all xact levels, we can clear guc_dirty. (Note:
+ * we cannot reset guc_dirty when exiting a subtransaction, because we
+ * know that all outer transaction levels will have stacked values to
+ * deal with.)
*/
if (!isSubXact)
guc_dirty = false;
{
struct config_generic *record;
int elevel;
- bool makeDefault, changeValOrig = changeVal;
+ bool makeDefault,
+ changeValOrig = changeVal;
if (context == PGC_SIGHUP || source == PGC_S_DEFAULT)
elevel = DEBUG2;
{
ereport(elevel,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
return false;
}
{
ereport(elevel,
(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM),
- errmsg("parameter \"%s\" cannot be changed after server start",
- name)));
+ errmsg("parameter \"%s\" cannot be changed after server start",
+ name)));
return false;
}
break;
{
ereport(elevel,
(errcode(ERRCODE_CANT_CHANGE_RUNTIME_PARAM),
- errmsg("parameter \"%s\" cannot be set after connection start",
- name)));
+ errmsg("parameter \"%s\" cannot be set after connection start",
+ name)));
return false;
}
break;
{
ereport(elevel,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set parameter \"%s\"",
- name)));
+ errmsg("permission denied to set parameter \"%s\"",
+ name)));
return false;
}
break;
name);
return true;
}
- changeVal = false; /* this might be reset in USERLIMIT */
+ changeVal = false; /* this might be reset in USERLIMIT */
}
/*
- * Evaluate value and set variable.
- * USERLIMIT checks two things: 1) is the user making a change
- * that is blocked by an administrator setting. 2) is the administrator
- * changing a setting and doing a SIGHUP that requires us to override
- * a user setting.
+ * Evaluate value and set variable. USERLIMIT checks two things: 1)
+ * is the user making a change that is blocked by an administrator
+ * setting. 2) is the administrator changing a setting and doing a
+ * SIGHUP that requires us to override a user setting.
*/
switch (record->vartype)
{
if (source > PGC_S_UNPRIVILEGED && !superuser())
{
ereport(elevel,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to set parameter \"%s\"",
name),
- errhint("must be superuser to change this value to false")));
+ errhint("must be superuser to change this value to false")));
return false;
}
}
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, (int) newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, (int) newval)));
return false;
}
}
if (makeDefault)
{
- GucStack *stack;
+ GucStack *stack;
if (conf->gen.reset_source <= source)
{
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("parameter \"%s\" requires an integer value",
- name)));
+ errmsg("parameter \"%s\" requires an integer value",
+ name)));
return false;
}
if (newval < conf->min || newval > conf->max)
{
/* handle log_min_duration_statement, -1=disable */
if ((newval != -1 && conf->reset_val != -1 &&
- newval > conf->reset_val) || /* increase duration */
- (newval == -1 && conf->reset_val != -1)) /* turn off */
+ newval > conf->reset_val) || /* increase duration */
+ (newval == -1 && conf->reset_val != -1)) /* turn off */
{
/* Limit non-superuser changes */
if (source > PGC_S_UNPRIVILEGED && !superuser())
{
ereport(elevel,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to set parameter \"%s\"",
name),
- errhint("Must be superuser to increase this value or turn it off.")));
+ errhint("Must be superuser to increase this value or turn it off.")));
return false;
}
}
/* Allow change if admin should override */
if ((newval != -1 && *conf->variable != -1 &&
- newval < *conf->variable) || /* decrease duration */
- (newval != -1 && *conf->variable == -1)) /* turn on */
+ newval < *conf->variable) || /* decrease duration */
+ (newval != -1 && *conf->variable == -1)) /* turn on */
{
if (source < PGC_S_UNPRIVILEGED &&
record->source > PGC_S_UNPRIVILEGED &&
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %d",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %d",
+ name, newval)));
return false;
}
}
if (makeDefault)
{
- GucStack *stack;
+ GucStack *stack;
if (conf->gen.reset_source <= source)
{
return false;
}
if (record->context == PGC_USERLIMIT)
- /* No REAL PGC_USERLIMIT */
+ /* No REAL PGC_USERLIMIT */
{
/* Limit non-superuser changes */
if (source > PGC_S_UNPRIVILEGED && !superuser())
{
ereport(elevel,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("permission denied to set parameter \"%s\"",
- name),
- errhint("Must be superuser to increase this value or turn it off.")));
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ errmsg("permission denied to set parameter \"%s\"",
+ name),
+ errhint("Must be superuser to increase this value or turn it off.")));
return false;
}
/* Allow change if admin should override */
{
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": %g",
- name, newval)));
+ errmsg("invalid value for parameter \"%s\": %g",
+ name, newval)));
return false;
}
}
if (makeDefault)
{
- GucStack *stack;
+ GucStack *stack;
if (conf->gen.reset_source <= source)
{
if (record->context == PGC_USERLIMIT)
{
- int var_value, reset_value, new_value;
- const char * (*var_hook) (int *var, const char *newval,
- bool doit, GucSource source);
-
+ int var_value,
+ reset_value,
+ new_value;
+ const char *(*var_hook) (int *var, const char *newval,
+ bool doit, GucSource source);
+
if (conf->assign_hook == assign_log_statement)
var_hook = assign_log_stmtlvl;
else
var_hook = assign_msglvl;
(*var_hook) (&new_value, newval, true, source);
- (*var_hook) (&reset_value, conf->reset_val, true,
+ (*var_hook) (&reset_value, conf->reset_val, true,
source);
(*var_hook) (&var_value, *conf->variable, true,
source);
if (source > PGC_S_UNPRIVILEGED && !superuser())
{
ereport(elevel,
- (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied to set parameter \"%s\"",
name),
- errhint("Must be superuser to increase this value.")));
+ errhint("Must be superuser to increase this value.")));
return false;
}
}
free(newval);
ereport(elevel,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for parameter \"%s\": \"%s\"",
- name, value ? value : "")));
+ errmsg("invalid value for parameter \"%s\": \"%s\"",
+ name, value ? value : "")));
return false;
}
else if (hookresult != newval)
}
if (makeDefault)
{
- GucStack *stack;
+ GucStack *stack;
if (conf->gen.reset_source <= source)
{
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
switch (record->vartype)
{
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
switch (record->vartype)
{
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
flags = record->flags;
}
static void
-define_custom_variable(struct config_generic* variable)
+define_custom_variable(struct config_generic * variable)
{
- const char* name = variable->name;
- const char** nameAddr = &name;
- const char* value;
- struct config_string* pHolder;
- struct config_generic** res = (struct config_generic**)bsearch(
- (void*)&nameAddr,
- (void*)guc_variables,
- num_guc_variables,
- sizeof(struct config_generic*),
- guc_var_compare);
-
- if(res == NULL)
+ const char *name = variable->name;
+ const char **nameAddr = &name;
+ const char *value;
+ struct config_string *pHolder;
+ struct config_generic **res = (struct config_generic **) bsearch(
+ (void *) &nameAddr,
+ (void *) guc_variables,
+ num_guc_variables,
+ sizeof(struct config_generic *),
+ guc_var_compare);
+
+ if (res == NULL)
{
add_guc_variable(variable, ERROR);
return;
}
- /* This better be a placeholder
+ /*
+ * This better be a placeholder
*/
- if(((*res)->flags & GUC_CUSTOM_PLACEHOLDER) == 0)
+ if (((*res)->flags & GUC_CUSTOM_PLACEHOLDER) == 0)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("attempt to redefine parameter \"%s\"", name)));
Assert((*res)->vartype == PGC_STRING);
- pHolder = (struct config_string*) *res;
-
+ pHolder = (struct config_string *) * res;
+
/* We have the same name, no sorting is necessary */
*res = variable;
value = *pHolder->variable;
/*
- * Assign the string value stored in the placeholder to the real variable.
+ * Assign the string value stored in the placeholder to the real
+ * variable.
*
* XXX this is not really good enough --- it should be a nontransactional
* assignment, since we don't want it to roll back if the current xact
* fails later.
*/
set_config_option(name, value,
- pHolder->gen.context, pHolder->gen.source,
- false, true);
+ pHolder->gen.context, pHolder->gen.source,
+ false, true);
/*
* Free up as much as we conveniently can of the placeholder structure
free(pHolder);
}
-static void init_custom_variable(
- struct config_generic* gen,
- const char* name,
- const char* short_desc,
- const char* long_desc,
- GucContext context,
- enum config_type type)
+static void
+init_custom_variable(
+ struct config_generic * gen,
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ GucContext context,
+ enum config_type type)
{
- gen->name = guc_strdup(ERROR, name);
- gen->context = context;
- gen->group = CUSTOM_OPTIONS;
+ gen->name = guc_strdup(ERROR, name);
+ gen->context = context;
+ gen->group = CUSTOM_OPTIONS;
gen->short_desc = short_desc;
- gen->long_desc = long_desc;
- gen->vartype = type;
+ gen->long_desc = long_desc;
+ gen->vartype = type;
}
-void DefineCustomBoolVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- bool* valueAddr,
- GucContext context,
- GucBoolAssignHook assign_hook,
- GucShowHook show_hook)
+void
+DefineCustomBoolVariable(
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ bool *valueAddr,
+ GucContext context,
+ GucBoolAssignHook assign_hook,
+ GucShowHook show_hook)
{
- size_t sz = sizeof(struct config_bool);
- struct config_bool* var = (struct config_bool*)guc_malloc(ERROR, sz);
+ size_t sz = sizeof(struct config_bool);
+ struct config_bool *var = (struct config_bool *) guc_malloc(ERROR, sz);
memset(var, 0, sz);
init_custom_variable(&var->gen, name, short_desc, long_desc, context, PGC_BOOL);
- var->variable = valueAddr;
- var->reset_val = *valueAddr;
+ var->variable = valueAddr;
+ var->reset_val = *valueAddr;
var->assign_hook = assign_hook;
- var->show_hook = show_hook;
+ var->show_hook = show_hook;
define_custom_variable(&var->gen);
}
-void DefineCustomIntVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- int* valueAddr,
- GucContext context,
- GucIntAssignHook assign_hook,
- GucShowHook show_hook)
+void
+DefineCustomIntVariable(
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ int *valueAddr,
+ GucContext context,
+ GucIntAssignHook assign_hook,
+ GucShowHook show_hook)
{
- size_t sz = sizeof(struct config_int);
- struct config_int* var = (struct config_int*)guc_malloc(ERROR, sz);
+ size_t sz = sizeof(struct config_int);
+ struct config_int *var = (struct config_int *) guc_malloc(ERROR, sz);
memset(var, 0, sz);
init_custom_variable(&var->gen, name, short_desc, long_desc, context, PGC_INT);
- var->variable = valueAddr;
- var->reset_val = *valueAddr;
+ var->variable = valueAddr;
+ var->reset_val = *valueAddr;
var->assign_hook = assign_hook;
- var->show_hook = show_hook;
+ var->show_hook = show_hook;
define_custom_variable(&var->gen);
}
-void DefineCustomRealVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- double* valueAddr,
- GucContext context,
- GucRealAssignHook assign_hook,
- GucShowHook show_hook)
+void
+DefineCustomRealVariable(
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ double *valueAddr,
+ GucContext context,
+ GucRealAssignHook assign_hook,
+ GucShowHook show_hook)
{
- size_t sz = sizeof(struct config_real);
- struct config_real* var = (struct config_real*)guc_malloc(ERROR, sz);
+ size_t sz = sizeof(struct config_real);
+ struct config_real *var = (struct config_real *) guc_malloc(ERROR, sz);
memset(var, 0, sz);
init_custom_variable(&var->gen, name, short_desc, long_desc, context, PGC_REAL);
- var->variable = valueAddr;
- var->reset_val = *valueAddr;
+ var->variable = valueAddr;
+ var->reset_val = *valueAddr;
var->assign_hook = assign_hook;
- var->show_hook = show_hook;
+ var->show_hook = show_hook;
define_custom_variable(&var->gen);
}
-void DefineCustomStringVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- char** valueAddr,
- GucContext context,
- GucStringAssignHook assign_hook,
- GucShowHook show_hook)
+void
+DefineCustomStringVariable(
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ char **valueAddr,
+ GucContext context,
+ GucStringAssignHook assign_hook,
+ GucShowHook show_hook)
{
- size_t sz = sizeof(struct config_string);
- struct config_string* var = (struct config_string*)guc_malloc(ERROR, sz);
+ size_t sz = sizeof(struct config_string);
+ struct config_string *var = (struct config_string *) guc_malloc(ERROR, sz);
memset(var, 0, sz);
init_custom_variable(&var->gen, name, short_desc, long_desc, context, PGC_STRING);
- var->variable = valueAddr;
- var->reset_val = *valueAddr;
+ var->variable = valueAddr;
+ var->reset_val = *valueAddr;
var->assign_hook = assign_hook;
- var->show_hook = show_hook;
+ var->show_hook = show_hook;
define_custom_variable(&var->gen);
}
-extern void EmitWarningsOnPlaceholders(const char* className)
+extern void
+EmitWarningsOnPlaceholders(const char *className)
{
- struct config_generic** vars = guc_variables;
- struct config_generic** last = vars + num_guc_variables;
+ struct config_generic **vars = guc_variables;
+ struct config_generic **last = vars + num_guc_variables;
+
+ int nameLen = strlen(className);
- int nameLen = strlen(className);
- while(vars < last)
+ while (vars < last)
{
- struct config_generic* var = *vars++;
- if((var->flags & GUC_CUSTOM_PLACEHOLDER) != 0 &&
- strncmp(className, var->name, nameLen) == 0 &&
- var->name[nameLen] == GUC_QUALIFIER_SEPARATOR)
+ struct config_generic *var = *vars++;
+
+ if ((var->flags & GUC_CUSTOM_PLACEHOLDER) != 0 &&
+ strncmp(className, var->name, nameLen) == 0 &&
+ var->name[nameLen] == GUC_QUALIFIER_SEPARATOR)
{
ereport(INFO,
- (errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", var->name)));
+ (errcode(ERRCODE_UNDEFINED_OBJECT),
+ errmsg("unrecognized configuration parameter \"%s\"", var->name)));
}
}
}
if (record == NULL)
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
- errmsg("unrecognized configuration parameter \"%s\"", name)));
+ errmsg("unrecognized configuration parameter \"%s\"", name)));
if (varname)
*varname = record->name;
* Open file
*/
new_filename = guc_malloc(elevel, strlen(DataDir) + strlen(CONFIG_EXEC_PARAMS) +
- strlen(".new") + 2);
- if(new_filename == NULL)
+ strlen(".new") + 2);
+ if (new_filename == NULL)
return;
filename = guc_malloc(elevel, strlen(DataDir) + strlen(CONFIG_EXEC_PARAMS) + 2);
}
/*
- * Put new file in place. This could delay on Win32, but we don't hold
- * any exclusive locks.
+ * Put new file in place. This could delay on Win32, but we don't
+ * hold any exclusive locks.
*/
rename(new_filename, filename);
free(new_filename);
break;
if ((record = find_option(varname, FATAL)) == NULL)
- elog(FATAL, "failed to locate variable %s in exec config params file",varname);
+ elog(FATAL, "failed to locate variable %s in exec config params file", varname);
if ((varvalue = read_string_with_null(fp)) == NULL)
elog(FATAL, "invalid format of exec config params file");
if (fread(&varsource, sizeof(varsource), 1, fp) == 0)
static const char *
assign_log_destination(const char *value, bool doit, GucSource source)
{
- char *rawstring;
- List *elemlist;
- ListCell *l;
- int newlogdest = 0;
-
+ char *rawstring;
+ List *elemlist;
+ ListCell *l;
+ int newlogdest = 0;
+
/* Need a modifiable copy of string */
rawstring = pstrdup(value);
/* Parse string into list of identifiers */
- if (!SplitIdentifierString(rawstring, ',', &elemlist))
+ if (!SplitIdentifierString(rawstring, ',', &elemlist))
{
/* syntax error in list */
pfree(rawstring);
foreach(l, elemlist)
{
- char *tok = (char *) lfirst(l);
-
- if (pg_strcasecmp(tok,"stderr") == 0)
+ char *tok = (char *) lfirst(l);
+
+ if (pg_strcasecmp(tok, "stderr") == 0)
newlogdest |= LOG_DESTINATION_STDERR;
#ifdef HAVE_SYSLOG
- else if (pg_strcasecmp(tok,"syslog") == 0)
+ else if (pg_strcasecmp(tok, "syslog") == 0)
newlogdest |= LOG_DESTINATION_SYSLOG;
#endif
#ifdef WIN32
- else if (pg_strcasecmp(tok,"eventlog") == 0)
+ else if (pg_strcasecmp(tok, "eventlog") == 0)
newlogdest |= LOG_DESTINATION_EVENTLOG;
#endif
- else {
+ else
+ {
if (source >= PGC_S_INTERACTIVE)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
if (doit)
(*var) = LOG;
}
+
/*
- * Client_min_messages always prints 'info', but
- * we allow it as a value anyway.
+ * Client_min_messages always prints 'info', but we allow it as a
+ * value anyway.
*/
else if (pg_strcasecmp(newval, "info") == 0)
{
static const char *
assign_log_statement(const char *newval, bool doit, GucSource source)
{
- return (assign_log_stmtlvl((int *)&log_statement, newval, doit, source));
+ return (assign_log_stmtlvl((int *) &log_statement, newval, doit, source));
}
static const char *
static const char *
assign_custom_variable_classes(const char *newval, bool doit, GucSource source)
{
- /* Check syntax. newval must be a comma separated list of identifiers.
+ /*
+ * Check syntax. newval must be a comma separated list of identifiers.
* Whitespace is allowed but skipped.
*/
- bool hasSpaceAfterToken = false;
+ bool hasSpaceAfterToken = false;
const char *cp = newval;
- int symLen = 0;
- int c;
+ int symLen = 0;
+ int c;
StringInfoData buf;
initStringInfo(&buf);
- while((c = *cp++) != 0)
+ while ((c = *cp++) != 0)
{
- if(isspace(c))
+ if (isspace(c))
{
- if(symLen > 0)
+ if (symLen > 0)
hasSpaceAfterToken = true;
continue;
}
- if(c == ',')
+ if (c == ',')
{
hasSpaceAfterToken = false;
- if(symLen > 0)
+ if (symLen > 0)
{
symLen = 0;
appendStringInfoChar(&buf, ',');
continue;
}
- if(hasSpaceAfterToken || !isalnum(c))
+ if (hasSpaceAfterToken || !isalnum(c))
{
- /* Syntax error due to token following space after
- * token or non alpha numeric character
+ /*
+ * Syntax error due to token following space after token or
+ * non alpha numeric character
*/
pfree(buf.data);
ereport(WARNING,
return NULL;
}
symLen++;
- appendStringInfoChar(&buf, (char)c);
+ appendStringInfoChar(&buf, (char) c);
}
- if(symLen == 0 && buf.len > 0)
+ if (symLen == 0 && buf.len > 0)
+
/*
* Remove stray ',' at end
*/
buf.data[--buf.len] = 0;
- if(buf.len == 0)
+ if (buf.len == 0)
newval = NULL;
- else if(doit)
+ else if (doit)
newval = strdup(buf.data);
pfree(buf.data);
if (doit)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot enable parameter when \"log_statement_stats\" is true.")));
+ errmsg("cannot enable parameter when \"log_statement_stats\" is true.")));
else
return false;
}
if (doit)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("cannot enable \"log_statement_stats\" when \"log_parser_stats\",\n"
- "\"log_planner_stats\", or \"log_executor_stats\" is true.")));
+ errmsg("cannot enable \"log_statement_stats\" when \"log_parser_stats\",\n"
+ "\"log_planner_stats\", or \"log_executor_stats\" is true.")));
else
return false;
}
static const char *
assign_canonical_path(const char *newval, bool doit, GucSource source)
{
-
+
if (doit)
{
/* We have to create a new pointer to force the change */
- char *canon_val = guc_strdup(ERROR, newval);
+ char *canon_val = guc_strdup(ERROR, newval);
+
canonicalize_path(canon_val);
return canon_val;
}
* to contain some useful information. Mechanism differs wildly across
* platforms.
*
- * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.20 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/misc/ps_status.c,v 1.21 2004/08/29 05:06:51 momjian Exp $
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
* various details abducted from various places
* from being clobbered by subsequent ps_display actions.
*
* (The original argv[] will not be overwritten by this routine, but may be
- * overwritten during init_ps_display. Also, the physical location of the
+ * overwritten during init_ps_display. Also, the physical location of the
* environment strings may be moved, so this should be called before any code
* that might try to hang onto a getenv() result.)
*/
-char **
+char **
save_ps_display_args(int argc, char **argv)
{
save_argc = argc;
new_argv = (char **) malloc((argc + 1) * sizeof(char *));
for (i = 0; i < argc; i++)
- new_argv[i] = strdup(argv[i]);
+ new_argv[i] = strdup(argv[i]);
new_argv[argc] = NULL;
#if defined(__darwin__)
+
/*
* Darwin (and perhaps other NeXT-derived platforms?) has a static
* copy of the argv pointer, which we may fix like so:
argv = new_argv;
}
-#endif /* PS_USE_CHANGE_ARGV or PS_USE_CLOBBER_ARGV */
+#endif /* PS_USE_CHANGE_ARGV or
+ * PS_USE_CLOBBER_ARGV */
return argv;
}
#ifdef PS_USE_CLOBBER_ARGV
{
- int i;
+ int i;
/* make extra argv slots point at end_of_area (a NUL) */
for (i = 1; i < save_argc; i++)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.56 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/aset.c,v 1.57 2004/08/29 05:06:51 momjian Exp $
*
* NOTE:
* This is a new (Feb. 05, 1999) implementation of the allocation set
{
/*
* Use first power of 2 that is larger than previous block,
- * but not more than the allowed limit. (We don't simply double
- * the prior block size, because in some cases this could be a
- * funny size, eg if very first allocation was for an odd-sized
- * large chunk.)
+ * but not more than the allowed limit. (We don't simply
+ * double the prior block size, because in some cases this
+ * could be a funny size, eg if very first allocation was for
+ * an odd-sized large chunk.)
*/
- Size pblksize = set->blocks->endptr - ((char *) set->blocks);
+ Size pblksize = set->blocks->endptr - ((char *) set->blocks);
blksize = set->initBlockSize;
while (blksize <= pblksize)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.49 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/mcxt.c,v 1.50 2004/08/29 05:06:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
pfree(pointer);
return;
}
+
#endif
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.70 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/mmgr/portalmem.c,v 1.71 2004/08/29 05:06:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
(*portal->cleanup) (portal);
/*
- * Release any resources still attached to the portal. There are
+ * Release any resources still attached to the portal. There are
* several cases being covered here:
*
* Top transaction commit (indicated by isTopCommit): normally we should
* do nothing here and let the regular end-of-transaction resource
- * releasing mechanism handle these resources too. However, if we have
- * a FAILED portal (eg, a cursor that got an error), we'd better clean
- * up its resources to avoid resource-leakage warning messages.
+ * releasing mechanism handle these resources too. However, if we
+ * have a FAILED portal (eg, a cursor that got an error), we'd better
+ * clean up its resources to avoid resource-leakage warning messages.
*
- * Sub transaction commit: never comes here at all, since we don't
- * kill any portals in AtSubCommit_Portals().
+ * Sub transaction commit: never comes here at all, since we don't kill
+ * any portals in AtSubCommit_Portals().
*
* Main or sub transaction abort: we will do nothing here because
* portal->resowner was already set NULL; the resources were already
* cleaned up in transaction abort.
*
* Ordinary portal drop: must release resources. However, if the portal
- * is not FAILED then we do not release its locks. The locks become
+ * is not FAILED then we do not release its locks. The locks become
* the responsibility of the transaction's ResourceOwner (since it is
* the parent of the portal's owner) and will be released when the
* transaction eventually ends.
if (portal->resowner &&
(!isTopCommit || portal->status == PORTAL_FAILED))
{
- bool isCommit = (portal->status != PORTAL_FAILED);
+ bool isCommit = (portal->status != PORTAL_FAILED);
ResourceOwnerRelease(portal->resowner,
RESOURCE_RELEASE_BEFORE_LOCKS,
* Do not touch active portals --- this can only happen in the
* case of a multi-transaction utility command, such as VACUUM.
*
- * Note however that any resource owner attached to such a portal
- * is still going to go away, so don't leave a dangling pointer.
+ * Note however that any resource owner attached to such a portal is
+ * still going to go away, so don't leave a dangling pointer.
*/
if (portal->status == PORTAL_ACTIVE)
{
PersistHoldablePortal(portal);
/*
- * Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; the portal will no
+ * Any resources belonging to the portal will be released in
+ * the upcoming transaction-wide cleanup; the portal will no
* longer have its own resources.
*/
portal->resowner = NULL;
(*portal->cleanup) (portal);
portal->cleanup = NULL;
}
+
/*
* Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; they will be gone before
- * we run PortalDrop.
+ * upcoming transaction-wide cleanup; they will be gone before we
+ * run PortalDrop.
*/
portal->resowner = NULL;
}
while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
{
- Portal portal = hentry->portal;
+ Portal portal = hentry->portal;
if (portal->createXact == curXid)
{
while ((hentry = (PortalHashEnt *) hash_seq_search(&status)) != NULL)
{
- Portal portal = hentry->portal;
+ Portal portal = hentry->portal;
if (portal->createXact != curXid)
continue;
/*
- * Force any active portals of my own transaction into FAILED state.
- * This is mostly to ensure that a portal running a FETCH will go
- * FAILED if the underlying cursor fails. (Note we do NOT want to
- * do this to upper-level portals, since they may be able to continue.)
+ * Force any active portals of my own transaction into FAILED
+ * state. This is mostly to ensure that a portal running a FETCH
+ * will go FAILED if the underlying cursor fails. (Note we do NOT
+ * want to do this to upper-level portals, since they may be able
+ * to continue.)
*/
- if (portal->status == PORTAL_ACTIVE)
+ if (portal->status == PORTAL_ACTIVE)
portal->status = PORTAL_FAILED;
/*
- * If the portal is READY then allow it to survive into the
- * parent transaction; otherwise shut it down.
+ * If the portal is READY then allow it to survive into the parent
+ * transaction; otherwise shut it down.
*/
if (portal->status == PORTAL_READY)
{
(*portal->cleanup) (portal);
portal->cleanup = NULL;
}
+
/*
- * Any resources belonging to the portal will be released in the
- * upcoming transaction-wide cleanup; they will be gone before
- * we run PortalDrop.
+ * Any resources belonging to the portal will be released in
+ * the upcoming transaction-wide cleanup; they will be gone
+ * before we run PortalDrop.
*/
portal->resowner = NULL;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.5 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/resowner/resowner.c,v 1.6 2004/08/29 05:06:51 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ResourceOwner parent; /* NULL if no parent (toplevel owner) */
ResourceOwner firstchild; /* head of linked list of children */
ResourceOwner nextchild; /* next child of same parent */
- const char *name; /* name (just for debugging) */
+ const char *name; /* name (just for debugging) */
/* We have built-in support for remembering owned buffers */
int nbuffers; /* number of owned buffer pins */
int ncatlistrefs; /* number of owned catcache-list pins */
CatCList **catlistrefs; /* dynamically allocated array */
- int maxcatlistrefs; /* currently allocated array size */
+ int maxcatlistrefs; /* currently allocated array size */
/* We have built-in support for remembering relcache references */
int nrelrefs; /* number of owned relcache pins */
/* Internal routines */
static void ResourceOwnerReleaseInternal(ResourceOwner owner,
- ResourceReleasePhase phase,
- bool isCommit,
- bool isTopLevel);
+ ResourceReleasePhase phase,
+ bool isCommit,
+ bool isTopLevel);
/*****************************************************************************
ResourceOwner owner;
owner = (ResourceOwner) MemoryContextAllocZero(TopMemoryContext,
- sizeof(ResourceOwnerData));
+ sizeof(ResourceOwnerData));
owner->name = name;
if (parent)
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
- * must be called three times. We do it this way because (a) we want to
+ * must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
save = CurrentResourceOwner;
PG_TRY();
- {
ResourceOwnerReleaseInternal(owner, phase, isCommit, isTopLevel);
- }
PG_CATCH();
{
CurrentResourceOwner = save;
{
/*
* For a top-level xact we are going to release all buffers,
- * so just do a single bufmgr call at the top of the recursion.
+ * so just do a single bufmgr call at the top of the
+ * recursion.
*/
if (owner == TopTransactionResourceOwner)
AtEOXact_Buffers(isCommit);
else
{
/*
- * Release buffers retail. Note that ReleaseBuffer will remove
- * the buffer entry from my list, so I just have to iterate till
- * there are none.
+ * Release buffers retail. Note that ReleaseBuffer will
+ * remove the buffer entry from my list, so I just have to
+ * iterate till there are none.
*
- * XXX this is fairly inefficient due to multiple BufMgrLock grabs
- * if there are lots of buffers to be released, but we don't
- * expect many (indeed none in the success case) so it's probably
- * not worth optimizing.
+ * XXX this is fairly inefficient due to multiple BufMgrLock
+ * grabs if there are lots of buffers to be released, but we
+ * don't expect many (indeed none in the success case) so it's
+ * probably not worth optimizing.
*
* We are however careful to release back-to-front, so as to
* avoid O(N^2) behavior in ResourceOwnerForgetBuffer().
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all references,
- * so just do a single relcache call at the top of the recursion.
+ * For a top-level xact we are going to release all
+ * references, so just do a single relcache call at the top of
+ * the recursion.
*/
if (owner == TopTransactionResourceOwner)
AtEOXact_RelationCache(isCommit);
{
/*
* Release relcache refs retail. Note that RelationClose will
- * remove the relref entry from my list, so I just have to iterate
- * till there are none.
+ * remove the relref entry from my list, so I just have to
+ * iterate till there are none.
*/
while (owner->nrelrefs > 0)
RelationClose(owner->relrefs[owner->nrelrefs - 1]);
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all locks (or at
- * least all non-session locks), so just do a single lmgr call
- * at the top of the recursion.
+ * For a top-level xact we are going to release all locks (or
+ * at least all non-session locks), so just do a single lmgr
+ * call at the top of the recursion.
*/
if (owner == TopTransactionResourceOwner)
ProcReleaseLocks(isCommit);
{
/*
* Release locks retail. Note that if we are committing a
- * subtransaction, we do NOT release its locks yet, but transfer
- * them to the parent.
+ * subtransaction, we do NOT release its locks yet, but
+ * transfer them to the parent.
*/
Assert(owner->parent != NULL);
if (isCommit)
if (isTopLevel)
{
/*
- * For a top-level xact we are going to release all references,
- * so just do a single catcache call at the top of the recursion.
+ * For a top-level xact we are going to release all
+ * references, so just do a single catcache call at the top of
+ * the recursion.
*/
if (owner == TopTransactionResourceOwner)
AtEOXact_CatCache(isCommit);
else
{
/*
- * Release catcache refs retail. Note that ReleaseCatCache will
- * remove the catref entry from my list, so I just have to iterate
- * till there are none. Ditto for catcache lists.
+ * Release catcache refs retail. Note that ReleaseCatCache
+ * will remove the catref entry from my list, so I just have
+ * to iterate till there are none. Ditto for catcache lists.
*/
while (owner->ncatrefs > 0)
ReleaseCatCache(owner->catrefs[owner->ncatrefs - 1]);
Assert(owner->nrelrefs == 0);
/*
- * Delete children. The recursive call will delink the child
- * from me, so just iterate as long as there is a child.
+ * Delete children. The recursive call will delink the child from me,
+ * so just iterate as long as there is a child.
*/
while (owner->firstchild != NULL)
ResourceOwnerDelete(owner->firstchild);
/*
- * We delink the owner from its parent before deleting it, so that
- * if there's an error we won't have deleted/busted owners still
- * attached to the owner tree. Better a leak than a crash.
+ * We delink the owner from its parent before deleting it, so that if
+ * there's an error we won't have deleted/busted owners still attached
+ * to the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
int i;
/*
- * Scan back-to-front because it's more likely we are releasing
- * a recently pinned buffer. This isn't always the case of course,
+ * Scan back-to-front because it's more likely we are releasing a
+ * recently pinned buffer. This isn't always the case of course,
* but it's the way to bet.
*/
for (i = nb1; i >= 0; i--)
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.43 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplesort.c,v 1.44 2004/08/29 05:06:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
&state->sortFnKinds[i]);
/*
- * We needn't fill in sk_strategy or sk_subtype since these scankeys
- * will never be passed to an index.
+ * We needn't fill in sk_strategy or sk_subtype since these
+ * scankeys will never be passed to an index.
*/
ScanKeyInit(&state->scanKeys[i],
attNums[i],
* Can't find a comparator, so use the operator as-is. Decide whether
* it is forward or reverse sort by looking at its name (grotty, but
* this only matters for deciding which end NULLs should get sorted
- * to). XXX possibly better idea: see whether its selectivity function
- * is scalargtcmp?
+ * to). XXX possibly better idea: see whether its selectivity
+ * function is scalargtcmp?
*/
tuple = SearchSysCache(OPEROID,
ObjectIdGetDatum(sortOperator),
/*
* If key values are equal, we sort on ItemPointer. This does not
- * affect validity of the finished index, but it offers cheap insurance
- * against performance problems with bad qsort implementations that have
- * trouble with large numbers of equal keys.
+ * affect validity of the finished index, but it offers cheap
+ * insurance against performance problems with bad qsort
+ * implementations that have trouble with large numbers of equal keys.
*/
{
BlockNumber blk1 = ItemPointerGetBlockNumber(&tuple1->t_tid);
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.19 2004/08/29 04:13:00 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/sort/tuplestore.c,v 1.20 2004/08/29 05:06:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
state->myfile = NULL;
state->memtupcount = 0;
- state->memtupsize = 1024; /* initial guess */
+ state->memtupsize = 1024; /* initial guess */
state->memtuples = (void **) palloc(state->memtupsize * sizeof(void *));
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.76 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/utils/time/tqual.c,v 1.77 2004/08/29 05:06:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* By here, the inserting transaction has committed - have to check
* when...
*
- * Note that the provided snapshot contains only top-level XIDs, so
- * we have to convert a subxact XID to its parent for comparison.
+ * Note that the provided snapshot contains only top-level XIDs, so we
+ * have to convert a subxact XID to its parent for comparison.
* However, we can make first-pass range checks with the given XID,
* because a subxact with XID < xmin has surely also got a parent with
* XID < xmin, while one with XID >= xmax must belong to a parent that
FreeXactSnapshot(void)
{
/*
- * We do not free the xip arrays for the snapshot structs;
- * they will be reused soon. So this is now just a state
- * change to prevent outside callers from accessing the snapshots.
+ * We do not free the xip arrays for the snapshot structs; they will
+ * be reused soon. So this is now just a state change to prevent
+ * outside callers from accessing the snapshots.
*/
QuerySnapshot = NULL;
SerializableSnapshot = NULL;
* Portions Copyright (c) 1994, Regents of the University of California
* Portions taken from FreeBSD.
*
- * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.52 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/initdb/initdb.c,v 1.53 2004/08/29 05:06:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
#include
#ifdef HAVE_LANGINFO_H
-# include
+#include
#endif
#include "libpq/pqsignal.h"
/*
* these values are passed in by makefile defines
*/
-char *share_path = NULL;
+char *share_path = NULL;
/* values to be obtained from arguments */
char *pg_data = "";
char *lc_messages = "";
char *username = "";
bool pwprompt = false;
-char *pwfilename = NULL;
-char *authmethod = "";
+char *pwfilename = NULL;
+char *authmethod = "";
bool debug = false;
bool noclean = false;
bool show_setting = false;
/*
* Warning messages for authentication methods
*/
-char *authtrust_warning = \
- "# CAUTION: Configuring the system for local \"trust\" authentication allows\n"
- "# any local user to connect as any PostgreSQL user, including the database\n"
- "# superuser. If you do not trust all your local users, use another\n"
- "# authenication method.\n";
-char *authwarning = NULL;
+char *authtrust_warning = \
+"# CAUTION: Configuring the system for local \"trust\" authentication allows\n"
+"# any local user to connect as any PostgreSQL user, including the database\n"
+"# superuser. If you do not trust all your local users, use another\n"
+"# authenication method.\n";
+char *authwarning = NULL;
/*
* Centralized knowledge of switches to pass to backend
/* path to 'initdb' binary directory */
-char bin_path[MAXPGPATH];
-char backend_exec[MAXPGPATH];
+char bin_path[MAXPGPATH];
+char backend_exec[MAXPGPATH];
static void *xmalloc(size_t size);
static char *xstrdup(const char *s);
static char **replace_token(char **lines, char *token, char *replacement);
static char **readfile(char *path);
static void writefile(char *path, char **lines);
-static int mkdir_p(char *path, mode_t omode);
+static int mkdir_p(char *path, mode_t omode);
static void exit_nicely(void);
static char *get_id(void);
static char *get_encoding_id(char *encoding_name);
static char *get_short_version(void);
-static int check_data_dir(void);
+static int check_data_dir(void);
static bool mkdatadir(const char *subdir);
static void set_input(char **dest, char *filename);
static void check_input(char *path);
{
fprintf(stderr,
_("%s: cannot be run as root\n"
- "Please log in (using, e.g., \"su\") as the "
- "(unprivileged) user that will\n"
- "own the server process.\n"),
+ "Please log in (using, e.g., \"su\") as the "
+ "(unprivileged) user that will\n"
+ "own the server process.\n"),
progname);
exit(1);
}
{
if ((enc = pg_char_to_encoding(encoding_name)) >= 0 &&
pg_valid_server_encoding(encoding_name) >= 0)
- {
return encodingid_to_string(enc);
- }
}
fprintf(stderr, _("%s: \"%s\" is not a valid server encoding name\n"),
progname, encoding_name ? encoding_name : "(null)");
struct encoding_match
{
- enum pg_enc pg_enc_code;
+ enum pg_enc pg_enc_code;
char *system_enc_name;
};
struct encoding_match encoding_match_list[] = {
- { PG_EUC_JP, "EUC-JP" },
- { PG_EUC_JP, "eucJP" },
- { PG_EUC_JP, "IBM-eucJP" },
- { PG_EUC_JP, "sdeckanji" },
-
- { PG_EUC_CN, "EUC-CN" },
- { PG_EUC_CN, "eucCN" },
- { PG_EUC_CN, "IBM-eucCN" },
- { PG_EUC_CN, "GB2312" },
- { PG_EUC_CN, "dechanzi" },
-
- { PG_EUC_KR, "EUC-KR" },
- { PG_EUC_KR, "eucKR" },
- { PG_EUC_KR, "IBM-eucKR" },
- { PG_EUC_KR, "deckorean" },
- { PG_EUC_KR, "5601" },
-
- { PG_EUC_TW, "EUC-TW" },
- { PG_EUC_TW, "eucTW" },
- { PG_EUC_TW, "IBM-eucTW" },
- { PG_EUC_TW, "cns11643" },
+ {PG_EUC_JP, "EUC-JP"},
+ {PG_EUC_JP, "eucJP"},
+ {PG_EUC_JP, "IBM-eucJP"},
+ {PG_EUC_JP, "sdeckanji"},
+
+ {PG_EUC_CN, "EUC-CN"},
+ {PG_EUC_CN, "eucCN"},
+ {PG_EUC_CN, "IBM-eucCN"},
+ {PG_EUC_CN, "GB2312"},
+ {PG_EUC_CN, "dechanzi"},
+
+ {PG_EUC_KR, "EUC-KR"},
+ {PG_EUC_KR, "eucKR"},
+ {PG_EUC_KR, "IBM-eucKR"},
+ {PG_EUC_KR, "deckorean"},
+ {PG_EUC_KR, "5601"},
+
+ {PG_EUC_TW, "EUC-TW"},
+ {PG_EUC_TW, "eucTW"},
+ {PG_EUC_TW, "IBM-eucTW"},
+ {PG_EUC_TW, "cns11643"},
#ifdef NOT_VERIFIED
- { PG_JOHAB, "???" },
+ {PG_JOHAB, "???"},
#endif
- { PG_UTF8, "UTF-8" },
- { PG_UTF8, "utf8" },
+ {PG_UTF8, "UTF-8"},
+ {PG_UTF8, "utf8"},
- { PG_LATIN1, "ISO-8859-1" },
- { PG_LATIN1, "ISO8859-1" },
- { PG_LATIN1, "iso88591" },
+ {PG_LATIN1, "ISO-8859-1"},
+ {PG_LATIN1, "ISO8859-1"},
+ {PG_LATIN1, "iso88591"},
- { PG_LATIN2, "ISO-8859-2" },
- { PG_LATIN2, "ISO8859-2" },
- { PG_LATIN2, "iso88592" },
+ {PG_LATIN2, "ISO-8859-2"},
+ {PG_LATIN2, "ISO8859-2"},
+ {PG_LATIN2, "iso88592"},
- { PG_LATIN3, "ISO-8859-3" },
- { PG_LATIN3, "ISO8859-3" },
- { PG_LATIN3, "iso88593" },
+ {PG_LATIN3, "ISO-8859-3"},
+ {PG_LATIN3, "ISO8859-3"},
+ {PG_LATIN3, "iso88593"},
- { PG_LATIN4, "ISO-8859-4" },
- { PG_LATIN4, "ISO8859-4" },
- { PG_LATIN4, "iso88594" },
+ {PG_LATIN4, "ISO-8859-4"},
+ {PG_LATIN4, "ISO8859-4"},
+ {PG_LATIN4, "iso88594"},
- { PG_LATIN5, "ISO-8859-9" },
- { PG_LATIN5, "ISO8859-9" },
- { PG_LATIN5, "iso88599" },
+ {PG_LATIN5, "ISO-8859-9"},
+ {PG_LATIN5, "ISO8859-9"},
+ {PG_LATIN5, "iso88599"},
- { PG_LATIN6, "ISO-8859-10" },
- { PG_LATIN6, "ISO8859-10" },
- { PG_LATIN6, "iso885910" },
+ {PG_LATIN6, "ISO-8859-10"},
+ {PG_LATIN6, "ISO8859-10"},
+ {PG_LATIN6, "iso885910"},
- { PG_LATIN7, "ISO-8859-13" },
- { PG_LATIN7, "ISO8859-13" },
- { PG_LATIN7, "iso885913" },
+ {PG_LATIN7, "ISO-8859-13"},
+ {PG_LATIN7, "ISO8859-13"},
+ {PG_LATIN7, "iso885913"},
- { PG_LATIN8, "ISO-8859-14" },
- { PG_LATIN8, "ISO8859-14" },
- { PG_LATIN8, "iso885914" },
+ {PG_LATIN8, "ISO-8859-14"},
+ {PG_LATIN8, "ISO8859-14"},
+ {PG_LATIN8, "iso885914"},
- { PG_LATIN9, "ISO-8859-15" },
- { PG_LATIN9, "ISO8859-15" },
- { PG_LATIN9, "iso885915" },
+ {PG_LATIN9, "ISO-8859-15"},
+ {PG_LATIN9, "ISO8859-15"},
+ {PG_LATIN9, "iso885915"},
- { PG_LATIN10, "ISO-8859-16" },
- { PG_LATIN10, "ISO8859-16" },
- { PG_LATIN10, "iso885916" },
+ {PG_LATIN10, "ISO-8859-16"},
+ {PG_LATIN10, "ISO8859-16"},
+ {PG_LATIN10, "iso885916"},
- { PG_WIN1256, "CP1256" },
- { PG_TCVN, "CP1258" },
+ {PG_WIN1256, "CP1256"},
+ {PG_TCVN, "CP1258"},
#ifdef NOT_VERIFIED
- { PG_WIN874, "???" },
+ {PG_WIN874, "???"},
#endif
- { PG_KOI8R, "KOI8-R" },
- { PG_WIN1251, "CP1251" },
- { PG_ALT, "CP866" },
+ {PG_KOI8R, "KOI8-R"},
+ {PG_WIN1251, "CP1251"},
+ {PG_ALT, "CP866"},
- { PG_ISO_8859_5, "ISO-8859-5" },
- { PG_ISO_8859_5, "ISO8859-5" },
- { PG_ISO_8859_5, "iso88595" },
+ {PG_ISO_8859_5, "ISO-8859-5"},
+ {PG_ISO_8859_5, "ISO8859-5"},
+ {PG_ISO_8859_5, "iso88595"},
- { PG_ISO_8859_6, "ISO-8859-6" },
- { PG_ISO_8859_6, "ISO8859-6" },
- { PG_ISO_8859_6, "iso88596" },
+ {PG_ISO_8859_6, "ISO-8859-6"},
+ {PG_ISO_8859_6, "ISO8859-6"},
+ {PG_ISO_8859_6, "iso88596"},
- { PG_ISO_8859_7, "ISO-8859-7" },
- { PG_ISO_8859_7, "ISO8859-7" },
- { PG_ISO_8859_7, "iso88597" },
+ {PG_ISO_8859_7, "ISO-8859-7"},
+ {PG_ISO_8859_7, "ISO8859-7"},
+ {PG_ISO_8859_7, "iso88597"},
- { PG_ISO_8859_8, "ISO-8859-8" },
- { PG_ISO_8859_8, "ISO8859-8" },
- { PG_ISO_8859_8, "iso88598" },
+ {PG_ISO_8859_8, "ISO-8859-8"},
+ {PG_ISO_8859_8, "ISO8859-8"},
+ {PG_ISO_8859_8, "iso88598"},
- { PG_SQL_ASCII, NULL } /* end marker */
+ {PG_SQL_ASCII, NULL} /* end marker */
};
static char *
static void
check_encodings_match(int pg_enc, const char *ctype)
{
- char *sys;
- int i;
+ char *sys;
+ int i;
sys = get_encoding_from_locale(ctype);
for (i = 0; encoding_match_list[i].system_enc_name; i++)
{
if (pg_enc == encoding_match_list[i].pg_enc_code
- && strcasecmp(sys, encoding_match_list[i].system_enc_name) == 0)
+ && strcasecmp(sys, encoding_match_list[i].system_enc_name) == 0)
{
free(sys);
return;
_("%s: warning: encoding mismatch\n"), progname);
fprintf(stderr,
_("The encoding you selected (%s) and the encoding that the selected\n"
- "locale uses (%s) are not known to match. This may lead to\n"
+ "locale uses (%s) are not known to match. This may lead to\n"
"misbehavior in various character string processing functions. To fix\n"
- "this situation, rerun %s and either do not specify an encoding\n"
+ "this situation, rerun %s and either do not specify an encoding\n"
"explicitly, or choose a matching combination.\n"),
pg_encoding_to_char(pg_enc), sys, progname);
static int
find_matching_encoding(const char *ctype)
{
- char *sys;
- int i;
+ char *sys;
+ int i;
sys = get_encoding_from_locale(ctype);
free(sys);
return -1;
}
-#endif /* HAVE_LANGINFO_H && CODESET */
+#endif /* HAVE_LANGINFO_H && CODESET */
/*
* get short version of VERSION
{
fprintf(stderr,
_("%s: file \"%s\" does not exist\n"
- "This means you have a corrupted installation or identified\n"
+ "This means you have a corrupted installation or identified\n"
"the wrong directory with the invocation option -L.\n"),
progname, path);
exit(1);
{
char cmd[MAXPGPATH];
static const int bufs[] = {1000, 900, 800, 700, 600, 500,
- 400, 300, 200, 100, 50};
+ 400, 300, 200, 100, 50};
static const int len = sizeof(bufs) / sizeof(int);
int i,
status;
"host all all ::1",
"#host all all ::1");
#endif
-
+
/* Replace default authentication methods */
conflines = replace_token(conflines,
- "@authmethod@",
+ "@authmethod@",
authmethod);
-
+
conflines = replace_token(conflines,
"@authcomment@",
- strcmp(authmethod,"trust") ? "" : authtrust_warning);
-
+ strcmp(authmethod, "trust") ? "" : authtrust_warning);
+
snprintf(path, sizeof(path), "%s/pg_hba.conf", pg_data);
writefile(path, conflines);
if (strcmp(headerline, *bki_lines) != 0)
{
fprintf(stderr,
- _("%s: input file \"%s\" does not belong to PostgreSQL %s\n"
- "Check your installation or specify the correct path "
- "using the option -L.\n"),
+ _("%s: input file \"%s\" does not belong to PostgreSQL %s\n"
+ "Check your installation or specify the correct path "
+ "using the option -L.\n"),
progname, bki_file, PG_VERSION);
exit_nicely();
/*
* Pass correct LC_xxx environment to bootstrap.
*
- * The shell script arranged to restore the LC settings afterwards,
- * but there doesn't seem to be any compelling reason to do that.
+ * The shell script arranged to restore the LC settings afterwards, but
+ * there doesn't seem to be any compelling reason to do that.
*/
snprintf(cmd, sizeof(cmd), "LC_COLLATE=%s", lc_collate);
putenv(xstrdup(cmd));
* Read password from file
*
* Ideally this should insist that the file not be world-readable.
- * However, this option is mainly intended for use on Windows where
- * file permissions may not exist at all, so we'll skip the paranoia
- * for now.
+ * However, this option is mainly intended for use on Windows
+ * where file permissions may not exist at all, so we'll skip the
+ * paranoia for now.
*/
- FILE *pwf = fopen(pwfilename,"r");
- char pwdbuf[MAXPGPATH];
- int i;
+ FILE *pwf = fopen(pwfilename, "r");
+ char pwdbuf[MAXPGPATH];
+ int i;
if (!pwf)
{
fclose(pwf);
i = strlen(pwdbuf);
- while (i > 0 && (pwdbuf[i-1] == '\r' || pwdbuf[i-1] == '\n'))
+ while (i > 0 && (pwdbuf[i - 1] == '\r' || pwdbuf[i - 1] == '\n'))
pwdbuf[--i] = '\0';
-
+
pwd1 = xstrdup(pwdbuf);
-
+
}
printf(_("setting password ... "));
fflush(stdout);
PG_CMD_OPEN;
if (fprintf(pg,
- "ALTER USER \"%s\" WITH PASSWORD '%s';\n", effective_user, pwd1) < 0)
+ "ALTER USER \"%s\" WITH PASSWORD '%s';\n", effective_user, pwd1) < 0)
{
/* write failure */
exit_nicely();
{
fprintf(stderr,
_("%s: The password file was not generated. "
- "Please report this problem.\n"),
+ "Please report this problem.\n"),
progname);
exit_nicely();
}
lines = readfile(info_schema_file);
/*
- * We use -N here to avoid backslashing stuff in information_schema.sql
+ * We use -N here to avoid backslashing stuff in
+ * information_schema.sql
*/
snprintf(cmd, sizeof(cmd),
"\"%s\" %s -N template1 >%s",
{"lc-time", required_argument, NULL, 6},
{"lc-messages", required_argument, NULL, 7},
{"no-locale", no_argument, NULL, 8},
- {"auth", required_argument, NULL, 'A'},
+ {"auth", required_argument, NULL, 'A'},
{"pwprompt", no_argument, NULL, 'W'},
{"pwfile", required_argument, NULL, 9},
{"username", required_argument, NULL, 'U'},
progname = get_progname(argv[0]);
set_pglocale_pgservice(argv[0], "initdb");
- if (argc > 1)
- {
- if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
- {
- usage(progname);
- exit(0);
- }
- if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
- {
- puts("initdb (PostgreSQL) " PG_VERSION);
- exit(0);
- }
- }
+ if (argc > 1)
+ {
+ if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-?") == 0)
+ {
+ usage(progname);
+ exit(0);
+ }
+ if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
+ {
+ puts("initdb (PostgreSQL) " PG_VERSION);
+ exit(0);
+ }
+ }
/* process command-line options */
break;
case 'd':
debug = true;
- printf(_("Running in debug mode.\n"));
+ printf(_("Running in debug mode.\n"));
break;
case 'n':
noclean = true;
break;
default:
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
- progname);
+ progname);
exit(1);
}
}
if (optind < argc)
{
fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
- progname, argv[optind + 1]);
+ progname, argv[optind + 1]);
fprintf(stderr, _("Try \"%s --help\" for more information.\n"),
progname);
}
fprintf(stderr, _("%s: you cannot specify both password prompt and password file\n"), progname);
exit(1);
}
-
+
if (authmethod == NULL || !strlen(authmethod))
{
authwarning = _("\nWARNING: enabling \"trust\" authentication for local connections.\n"
"You can change this by editing pg_hba.conf or using the -A flag the\n"
"next time you run initdb.\n");
- authmethod="trust";
+ authmethod = "trust";
}
- if (strcmp(authmethod,"md5") &&
- strcmp(authmethod,"ident") &&
- strncmp(authmethod,"ident ",6) && /* ident with space = param */
- strcmp(authmethod,"trust") &&
+ if (strcmp(authmethod, "md5") &&
+ strcmp(authmethod, "ident") &&
+ strncmp(authmethod, "ident ", 6) && /* ident with space =
+ * param */
+ strcmp(authmethod, "trust") &&
#ifdef USE_PAM
- strcmp(authmethod,"pam") &&
- strncmp(authmethod,"pam ",4) && /* pam with space = param */
+ strcmp(authmethod, "pam") &&
+ strncmp(authmethod, "pam ", 4) && /* pam with space = param */
#endif
- strcmp(authmethod,"crypt") &&
- strcmp(authmethod,"password")
+ strcmp(authmethod, "crypt") &&
+ strcmp(authmethod, "password")
)
+
/*
- * Kerberos methods not listed because they are not supported
- * over local connections and are rejected in hba.c
+ * Kerberos methods not listed because they are not supported over
+ * local connections and are rejected in hba.c
*/
{
fprintf(stderr, _("%s: unknown authentication method \"%s\".\n"), progname, authmethod);
exit(1);
}
- if ((!strcmp(authmethod,"md5") ||
- !strcmp(authmethod,"crypt") ||
- !strcmp(authmethod,"password")) &&
- !(pwprompt || pwfilename))
+ if ((!strcmp(authmethod, "md5") ||
+ !strcmp(authmethod, "crypt") ||
+ !strcmp(authmethod, "password")) &&
+ !(pwprompt || pwfilename))
{
fprintf(stderr, _("%s: you need to specify a password for the superuser to enable %s authentication.\n"), progname, authmethod);
exit(1);
/*
* we have to set PGDATA for postgres rather than pass it on the
- * command line to avoid dumb quoting problems on Windows, and we would
- * especially need quotes otherwise on Windows because paths there are
- * most likely to have embedded spaces.
+ * command line to avoid dumb quoting problems on Windows, and we
+ * would especially need quotes otherwise on Windows because paths
+ * there are most likely to have embedded spaces.
*/
pgdenv = xmalloc(8 + strlen(pg_data));
sprintf(pgdenv, "PGDATA=%s", pg_data);
{
if (ret == -1)
fprintf(stderr,
- _("The program \"postgres\" is needed by %s "
- "but was not found in the same directory as \"%s\".\n"
- "Check your installation.\n"),
- progname, progname);
+ _("The program \"postgres\" is needed by %s "
+ "but was not found in the same directory as \"%s\".\n"
+ "Check your installation.\n"),
+ progname, progname);
else
fprintf(stderr,
- _("The program \"postgres\" was found by %s "
- "but was not the same version as \"%s\".\n"
- "Check your installation.\n"),
- progname, progname);
+ _("The program \"postgres\" was found by %s "
+ "but was not the same version as \"%s\".\n"
+ "Check your installation.\n"),
+ progname, progname);
exit(1);
}
fprintf(stderr, _("%s: input file location must be an absolute path\n"), progname);
exit(1);
}
-
+
canonicalize_path(share_path);
if ((short_version = get_short_version()) == NULL)
setlocales();
printf(_("The files belonging to this database system will be owned "
- "by user \"%s\".\n"
- "This user must also own the server process.\n\n"),
+ "by user \"%s\".\n"
+ "This user must also own the server process.\n\n"),
effective_user);
if (strcmp(lc_ctype, lc_collate) == 0 &&
strcmp(lc_ctype, lc_numeric) == 0 &&
strcmp(lc_ctype, lc_monetary) == 0 &&
strcmp(lc_ctype, lc_messages) == 0)
- {
printf(_("The database cluster will be initialized with locale %s.\n"), lc_ctype);
- }
else
{
printf(_("The database cluster will be initialized with locales\n"
- " COLLATE: %s\n"
- " CTYPE: %s\n"
- " MESSAGES: %s\n"
- " MONETARY: %s\n"
- " NUMERIC: %s\n"
- " TIME: %s\n"),
+ " COLLATE: %s\n"
+ " CTYPE: %s\n"
+ " MESSAGES: %s\n"
+ " MONETARY: %s\n"
+ " NUMERIC: %s\n"
+ " TIME: %s\n"),
lc_collate,
lc_ctype,
lc_messages,
{
if (strlen(encoding) == 0)
{
- int tmp;
+ int tmp;
+
tmp = find_matching_encoding(lc_ctype);
if (tmp == -1)
{
else
{
encodingid = encodingid_to_string(tmp);
- printf(_("The default database encoding has accordingly been set to %s.\n"),
+ printf(_("The default database encoding has accordingly been set to %s.\n"),
pg_encoding_to_char(tmp));
}
}
else
check_encodings_match(atoi(encodingid), lc_ctype);
}
-#endif /* HAVE_LANGINFO_H && CODESET */
+#endif /* HAVE_LANGINFO_H && CODESET */
printf("\n");
/* Present and not empty */
fprintf(stderr,
_("%s: directory \"%s\" exists but is not empty\n"
- "If you want to create a new database system, either remove or empty\n"
- "the directory \"%s\" or run %s\n"
- "with an argument other than \"%s\".\n"),
+ "If you want to create a new database system, either remove or empty\n"
+ "the directory \"%s\" or run %s\n"
+ "with an argument other than \"%s\".\n"),
progname, pg_data, pg_data, progname, pg_data);
exit(1); /* no further message needed */
/*
* Determine platform-specific config settings
*
- * Use reasonable values if kernel will let us, else scale back. Probe for
- * max_connections first since it is subject to more constraints than
- * shared_buffers.
+ * Use reasonable values if kernel will let us, else scale back. Probe
+ * for max_connections first since it is subject to more constraints
+ * than shared_buffers.
*/
set_null_conf();
/* Bootstrap template1 */
bootstrap_template1(short_version);
- /* Make the per-database PG_VERSION for template1 only after init'ing it */
+ /*
+ * Make the per-database PG_VERSION for template1 only after init'ing
+ * it
+ */
set_short_version(short_version, "base/1");
/* Create the stuff we don't need to use bootstrap mode for */
fprintf(stderr, authwarning);
printf(_("\nSuccess. You can now start the database server using:\n\n"
- " %s%s%s/postmaster -D %s%s%s\n"
- "or\n"
- " %s%s%s/pg_ctl -D %s%s%s -l logfile start\n\n"),
- QUOTE_PATH, bin_path, QUOTE_PATH, QUOTE_PATH, pg_data, QUOTE_PATH,
- QUOTE_PATH, bin_path, QUOTE_PATH, QUOTE_PATH, pg_data, QUOTE_PATH);
+ " %s%s%s/postmaster -D %s%s%s\n"
+ "or\n"
+ " %s%s%s/pg_ctl -D %s%s%s -l logfile start\n\n"),
+ QUOTE_PATH, bin_path, QUOTE_PATH, QUOTE_PATH, pg_data, QUOTE_PATH,
+ QUOTE_PATH, bin_path, QUOTE_PATH, QUOTE_PATH, pg_data, QUOTE_PATH);
return 0;
}
* accomodate the possibility that the installation has been relocated from
* the place originally configured.
*
*
* This code is released under the terms of the PostgreSQL License.
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.5 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_config/pg_config.c,v 1.6 2004/08/29 05:06:52 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static void
help()
{
- printf(_("\n%s provides information about the installed version of PostgreSQL.\n\n"),progname);
+ printf(_("\n%s provides information about the installed version of PostgreSQL.\n\n"), progname);
printf(_("Usage:\n"));
- printf(_(" %s OPTION...\n\n"),progname);
+ printf(_(" %s OPTION...\n\n"), progname);
printf(_("Options:\n"));
printf(_(" --bindir show location of user executables\n"));
printf(_(" --includedir show location of C header files of the client\n"));
static void
advice()
{
- fprintf(stderr,_("\nTry \"%s --help\" for more information\n"),progname);
+ fprintf(stderr, _("\nTry \"%s --help\" for more information\n"), progname);
}
int
-main (int argc, char ** argv)
+main(int argc, char **argv)
{
- int i;
- int ret;
- char mypath[MAXPGPATH];
- char otherpath[MAXPGPATH];
+ int i;
+ int ret;
+ char mypath[MAXPGPATH];
+ char otherpath[MAXPGPATH];
- progname = (char *)get_progname(argv[0]);
+ progname = (char *) get_progname(argv[0]);
if (argc < 2)
{
- fprintf(stderr,_("%s: argument required\n"),progname);
+ fprintf(stderr, _("%s: argument required\n"), progname);
advice();
exit(1);
}
- for (i=1; i < argc; i++)
+ for (i = 1; i < argc; i++)
{
- if (strcmp(argv[i],"--bindir") == 0 ||
- strcmp(argv[i],"--includedir") == 0 ||
- strcmp(argv[i],"--includedir-server") == 0 ||
- strcmp(argv[i],"--libdir") == 0 ||
- strcmp(argv[i],"--pkglibdir") == 0 ||
- strcmp(argv[i],"--pgxs") == 0 ||
- strcmp(argv[i],"--configure") == 0)
+ if (strcmp(argv[i], "--bindir") == 0 ||
+ strcmp(argv[i], "--includedir") == 0 ||
+ strcmp(argv[i], "--includedir-server") == 0 ||
+ strcmp(argv[i], "--libdir") == 0 ||
+ strcmp(argv[i], "--pkglibdir") == 0 ||
+ strcmp(argv[i], "--pgxs") == 0 ||
+ strcmp(argv[i], "--configure") == 0)
{
/* come back to these later */
- continue;
+ continue;
}
- if (strcmp(argv[i],"--version") == 0)
+ if (strcmp(argv[i], "--version") == 0)
{
printf("PostgreSQL " PG_VERSION "\n");
exit(0);
}
- if (strcmp(argv[i],"--help") == 0 || strcmp(argv[i],"-?") == 0)
+ if (strcmp(argv[i], "--help") == 0 || strcmp(argv[i], "-?") == 0)
{
help();
exit(0);
}
- fprintf(stderr,_("%s: invalid argument: %s\n"),progname,argv[i]);
+ fprintf(stderr, _("%s: invalid argument: %s\n"), progname, argv[i]);
advice();
exit(1);
}
- ret = find_my_exec(argv[0],mypath);
+ ret = find_my_exec(argv[0], mypath);
if (ret)
{
- fprintf(stderr,"%s: could not locate my own executable\n",progname);
+ fprintf(stderr, "%s: could not locate my own executable\n", progname);
exit(1);
}
- for (i=1; i < argc; i++)
+ for (i = 1; i < argc; i++)
{
- if (strcmp(argv[i],"--configure") == 0)
+ if (strcmp(argv[i], "--configure") == 0)
{
/* the VAL_CONFIGURE macro must be defined by the Makefile */
- printf("%s\n",VAL_CONFIGURE);
+ printf("%s\n", VAL_CONFIGURE);
continue;
}
- if (strcmp(argv[i],"--bindir") == 0)
+ if (strcmp(argv[i], "--bindir") == 0)
{
/* assume we are located in the bindir */
- char *lastsep;
- strcpy(otherpath,mypath);
- lastsep = strrchr(otherpath,'/');
+ char *lastsep;
+
+ strcpy(otherpath, mypath);
+ lastsep = strrchr(otherpath, '/');
if (lastsep)
*lastsep = '\0';
}
- else if (strcmp(argv[i],"--includedir") == 0)
- get_include_path(mypath,otherpath);
- else if (strcmp(argv[i],"--includedir-server") ==0)
- get_includeserver_path(mypath,otherpath);
- else if (strcmp(argv[i],"--libdir") == 0)
- get_lib_path(mypath,otherpath);
- else if (strcmp(argv[i],"--pkglibdir") == 0)
- get_pkglib_path(mypath,otherpath);
- else if (strcmp(argv[i],"--pgxs") == 0)
+ else if (strcmp(argv[i], "--includedir") == 0)
+ get_include_path(mypath, otherpath);
+ else if (strcmp(argv[i], "--includedir-server") == 0)
+ get_includeserver_path(mypath, otherpath);
+ else if (strcmp(argv[i], "--libdir") == 0)
+ get_lib_path(mypath, otherpath);
+ else if (strcmp(argv[i], "--pkglibdir") == 0)
+ get_pkglib_path(mypath, otherpath);
+ else if (strcmp(argv[i], "--pgxs") == 0)
{
- get_pkglib_path(mypath,otherpath);
- strncat(otherpath, "/pgxs/src/makefiles/pgxs.mk", MAXPGPATH-1);
+ get_pkglib_path(mypath, otherpath);
+ strncat(otherpath, "/pgxs/src/makefiles/pgxs.mk", MAXPGPATH - 1);
}
- printf("%s\n",otherpath);
+ printf("%s\n", otherpath);
}
return 0;
* copyright (c) Oliver Elphick
, 2001;
* licence: BSD
*
- * $PostgreSQL: pgsql/src/bin/pg_controldata/pg_controldata.c,v 1.18 2004/07/21 22:31:23 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_controldata/pg_controldata.c,v 1.19 2004/08/29 05:06:53 momjian Exp $
*/
#include "postgres.h"
localtime(&(ControlFile.time)));
strftime(ckpttime_str, sizeof(ckpttime_str), strftime_fmt,
localtime(&(ControlFile.checkPointCopy.time)));
+
/*
- * Format system_identifier separately to keep platform-dependent format
- * code out of the translatable message string.
+ * Format system_identifier separately to keep platform-dependent
+ * format code out of the translatable message string.
*/
snprintf(sysident_str, sizeof(sysident_str), UINT64_FORMAT,
ControlFile.system_identifier);
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.29 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_ctl/pg_ctl.c,v 1.30 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define _(x) gettext((x))
-#define WHITESPACE "\f\n\r\t\v" /* as defined by isspace() */
+#define WHITESPACE "\f\n\r\t\v" /* as defined by isspace() */
/* postmaster version ident string */
#define PM_VERSIONSTR "postmaster (PostgreSQL) " PG_VERSION "\n"
SMART_MODE,
FAST_MODE,
IMMEDIATE_MODE
-} ShutdownMode;
+} ShutdownMode;
typedef enum
REGISTER_COMMAND,
UNREGISTER_COMMAND,
RUN_AS_SERVICE_COMMAND
-} CtlCommand;
+} CtlCommand;
static bool do_wait = false;
static int wait_seconds = 60;
static bool silence_echo = false;
static ShutdownMode shutdown_mode = SMART_MODE;
-static int sig = SIGTERM; /* default */
+static int sig = SIGTERM; /* default */
static CtlCommand ctl_command = NO_COMMAND;
static char *pg_data = NULL;
static char *pgdata_opt = NULL;
static const char *progname;
static char *log_file = NULL;
static char *postgres_path = NULL;
-static char *register_servicename = "PostgreSQL"; /* FIXME: + version ID? */
+static char *register_servicename = "PostgreSQL"; /* FIXME: + version ID? */
static char *register_username = NULL;
static char *register_password = NULL;
static char *argv0 = NULL;
-static void write_stderr(const char *fmt,...)
+static void
+write_stderr(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
__attribute__((format(printf, 1, 2)));
static void do_reload(void);
static void do_status(void);
static void do_kill(pgpid_t pid);
+
#ifdef WIN32
-static bool pgwin32_IsInstalled(SC_HANDLE);
-static char* pgwin32_CommandLine(bool);
+static bool pgwin32_IsInstalled(SC_HANDLE);
+static char *pgwin32_CommandLine(bool);
static void pgwin32_doRegister();
static void pgwin32_doUnregister();
static void pgwin32_SetServiceStatus(DWORD);
static void WINAPI pgwin32_ServiceHandler(DWORD);
-static void WINAPI pgwin32_ServiceMain(DWORD, LPTSTR*);
+static void WINAPI pgwin32_ServiceMain(DWORD, LPTSTR *);
static void pgwin32_doRunAsService();
#endif
static pgpid_t get_pgpid(void);
static char **readfile(char *path);
-static int start_postmaster(void);
+static int start_postmaster(void);
static bool test_postmaster_connection(void);
static char def_postopts_file[MAXPGPATH];
{
static HANDLE evtHandle = INVALID_HANDLE_VALUE;
- if (evtHandle == INVALID_HANDLE_VALUE) {
- evtHandle = RegisterEventSource(NULL,"PostgreSQL");
- if (evtHandle == NULL) {
+ if (evtHandle == INVALID_HANDLE_VALUE)
+ {
+ evtHandle = RegisterEventSource(NULL, "PostgreSQL");
+ if (evtHandle == NULL)
+ {
evtHandle = INVALID_HANDLE_VALUE;
return;
}
ReportEvent(evtHandle,
level,
0,
- 0, /* All events are Id 0 */
+ 0, /* All events are Id 0 */
NULL,
1,
0,
static void
write_stderr(const char *fmt,...)
{
- va_list ap;
+ va_list ap;
va_start(ap, fmt);
#ifndef WIN32
/* On Unix, we just fprintf to stderr */
vfprintf(stderr, fmt, ap);
#else
- /* On Win32, we print to stderr if running on a console, or write to
- * eventlog if running as a service */
- if (!isatty(fileno(stderr))) /* Running as a service */
+
+ /*
+ * On Win32, we print to stderr if running on a console, or write to
+ * eventlog if running as a service
+ */
+ if (!isatty(fileno(stderr))) /* Running as a service */
{
- char errbuf[2048]; /* Arbitrary size? */
+ char errbuf[2048]; /* Arbitrary size? */
vsnprintf(errbuf, sizeof(errbuf), fmt, ap);
write_eventlog(EVENTLOG_ERROR_TYPE, errbuf);
}
- else /* Not running as service, write to stderr */
+ else
+/* Not running as service, write to stderr */
vfprintf(stderr, fmt, ap);
#endif
va_end(ap);
start_postmaster(void)
{
/*
- * Since there might be quotes to handle here, it is easier simply
- * to pass everything to a shell to process them.
+ * Since there might be quotes to handle here, it is easier simply to
+ * pass everything to a shell to process them.
*/
char cmd[MAXPGPATH];
-
+
/*
- * Win32 needs START /B rather than "&".
+ * Win32 needs START /B rather than "&".
*
- * Win32 has a problem with START and quoted executable names.
- * You must add a "" as the title at the beginning so you can quote
- * the executable name:
- * http://www.winnetmag.com/Article/ArticleID/14589/14589.html
- * http://dev.remotenetworktechnology.com/cmd/cmdfaq.htm
+ * Win32 has a problem with START and quoted executable names. You must
+ * add a "" as the title at the beginning so you can quote the
+ * executable name:
+ * http://www.winnetmag.com/Article/ArticleID/14589/14589.html
+ * http://dev.remotenetworktechnology.com/cmd/cmdfaq.htm
*/
if (log_file != NULL)
#ifndef WIN32
#else
snprintf(cmd, MAXPGPATH, "%sSTART /B \"\" \"%s\" %s%s < \"%s\" >> \"%s\" 2>&1%s",
#endif
- SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
- DEVNULL, log_file, SYSTEMQUOTE);
+ SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
+ DEVNULL, log_file, SYSTEMQUOTE);
else
#ifndef WIN32
snprintf(cmd, MAXPGPATH, "%s\"%s\" %s%s < \"%s\" 2>&1 &%s",
#else
snprintf(cmd, MAXPGPATH, "%sSTART /B \"\" \"%s\" %s%s < \"%s\" 2>&1%s",
#endif
- SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
- DEVNULL, SYSTEMQUOTE);
+ SYSTEMQUOTE, postgres_path, pgdata_opt, post_opts,
+ DEVNULL, SYSTEMQUOTE);
return system(cmd);
}
bool success = false;
int i;
char portstr[32];
- char *p;
+ char *p;
*portstr = '\0';
/* advance past whitespace/quoting */
while (isspace(*p) || *p == '\'' || *p == '"')
p++;
-
+
if (strncmp(p, "-p", strlen("-p")) == 0)
{
p += strlen("-p");
/* advance past whitespace/quoting */
while (isspace(*p) || *p == '\'' || *p == '"')
p++;
- StrNCpy(portstr, p, Min(strcspn(p, "\"'"WHITESPACE) + 1,
+ StrNCpy(portstr, p, Min(strcspn(p, "\"'" WHITESPACE) + 1,
sizeof(portstr)));
/* keep looking, maybe there is another -p */
}
optlines = readfile(conf_file);
if (optlines != NULL)
{
- for (;*optlines != NULL; optlines++)
+ for (; *optlines != NULL; optlines++)
{
p = *optlines;
p++;
while (isspace(*p))
p++;
- StrNCpy(portstr, p, Min(strcspn(p, "#"WHITESPACE) + 1,
+ StrNCpy(portstr, p, Min(strcspn(p, "#" WHITESPACE) + 1,
sizeof(portstr)));
/* keep looking, maybe there is another */
}
for (i = 0; i < wait_seconds; i++)
{
if ((conn = PQsetdbLogin(NULL, portstr, NULL, NULL,
- "template1", NULL, NULL)) != NULL &&
+ "template1", NULL, NULL)) != NULL &&
PQstatus(conn) == CONNECTION_OK)
{
PQfinish(conn);
else if (optlines[0] == NULL || optlines[1] != NULL)
{
write_stderr(_("%s: option file %s must have exactly 1 line\n"),
- progname, ctl_command == RESTART_COMMAND ?
- postopts_file : def_postopts_file);
+ progname, ctl_command == RESTART_COMMAND ?
+ postopts_file : def_postopts_file);
exit(1);
}
else
optline = optlines[0];
len = strcspn(optline, "\r\n");
optline[len] = '\0';
-
+
if (ctl_command == RESTART_COMMAND)
{
char *arg1;
-
+
arg1 = strchr(optline, '\'');
if (arg1 == NULL || arg1 == optline)
post_opts = "";
else
{
- *(arg1 - 1) = '\0'; /* this should be a space */
+ *(arg1 - 1) = '\0'; /* this should be a space */
post_opts = arg1;
}
if (postgres_path != NULL)
/* No -D or -D already added during server start */
if (ctl_command == RESTART_COMMAND || pgdata_opt == NULL)
- pgdata_opt = "";
-
+ pgdata_opt = "";
+
if (postgres_path == NULL)
{
char *postmaster_path;
{
if (ret == -1)
write_stderr(_("The program \"postmaster\" is needed by %s "
- "but was not found in the same directory as "
+ "but was not found in the same directory as "
"\"%s\".\n"
"Check your installation.\n"),
progname, progname);
if (kill((pid_t) pid, sig) != 0)
{
write_stderr(_("stop signal failed (PID: %ld): %s\n"), pid,
- strerror(errno));
+ strerror(errno));
exit(1);
}
printf(_("waiting for postmaster to shut down..."));
fflush(stdout);
}
-
+
for (cnt = 0; cnt < wait_seconds; cnt++)
{
if ((pid = get_pgpid()) != 0)
printf(".");
fflush(stdout);
}
- pg_usleep(1000000); /* 1 sec */
+ pg_usleep(1000000); /* 1 sec */
}
else
break;
}
-
- if (pid != 0) /* pid file still exists */
+
+ if (pid != 0) /* pid file still exists */
{
if (!silence_echo)
printf(_(" failed\n"));
-
+
write_stderr(_("%s: postmaster does not shut down\n"), progname);
exit(1);
}
if (kill((pid_t) pid, sig) != 0)
{
write_stderr(_("stop signal failed (PID: %ld): %s\n"), pid,
- strerror(errno));
+ strerror(errno));
exit(1);
}
if (kill((pid_t) pid, sig) != 0)
{
write_stderr(_("reload signal failed (PID: %ld): %s\n"), pid,
- strerror(errno));
+ strerror(errno));
exit(1);
}
pid = -pid;
fprintf(stdout, _("%s: a standalone backend \"postgres\" is running (PID: %ld)\n"), progname, pid);
}
- else /* postmaster */
+ else
+/* postmaster */
{
char **optlines;
if (kill((pid_t) pid, sig) != 0)
{
write_stderr(_("signal %d failed (PID: %ld): %s\n"), sig, pid,
- strerror(errno));
+ strerror(errno));
exit(1);
}
}
#ifdef WIN32
-static bool pgwin32_IsInstalled(SC_HANDLE hSCM)
+static bool
+pgwin32_IsInstalled(SC_HANDLE hSCM)
{
- SC_HANDLE hService = OpenService(hSCM, register_servicename, SERVICE_QUERY_CONFIG);
- bool bResult = (hService != NULL);
+ SC_HANDLE hService = OpenService(hSCM, register_servicename, SERVICE_QUERY_CONFIG);
+ bool bResult = (hService != NULL);
+
if (bResult)
CloseServiceHandle(hService);
return bResult;
}
-static char* pgwin32_CommandLine(bool registration)
+static char *
+pgwin32_CommandLine(bool registration)
{
static char cmdLine[MAXPGPATH];
- int ret;
+ int ret;
+
if (registration)
ret = find_my_exec(argv0, cmdLine);
else
if (registration)
{
- if (strcasecmp(cmdLine+strlen(cmdLine)-4,".exe"))
+ if (strcasecmp(cmdLine + strlen(cmdLine) - 4, ".exe"))
{
/* If commandline does not end in .exe, append it */
- strcat(cmdLine,".exe");
+ strcat(cmdLine, ".exe");
}
- strcat(cmdLine," runservice -N \"");
- strcat(cmdLine,register_servicename);
- strcat(cmdLine,"\"");
+ strcat(cmdLine, " runservice -N \"");
+ strcat(cmdLine, register_servicename);
+ strcat(cmdLine, "\"");
}
if (pg_data)
{
- strcat(cmdLine," -D \"");
- strcat(cmdLine,pg_data);
- strcat(cmdLine,"\"");
+ strcat(cmdLine, " -D \"");
+ strcat(cmdLine, pg_data);
+ strcat(cmdLine, "\"");
}
if (do_wait)
strcat(cmdLine, " -w");
-
+
if (post_opts)
{
- strcat(cmdLine," ");
+ strcat(cmdLine, " ");
if (registration)
- strcat(cmdLine," -o \"");
- strcat(cmdLine,post_opts);
+ strcat(cmdLine, " -o \"");
+ strcat(cmdLine, post_opts);
if (registration)
- strcat(cmdLine,"\"");
+ strcat(cmdLine, "\"");
}
return cmdLine;
static void
pgwin32_doRegister()
{
- SC_HANDLE hService;
- SC_HANDLE hSCM = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
+ SC_HANDLE hService;
+ SC_HANDLE hSCM = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
+
if (hSCM == NULL)
{
write_stderr(_("Unable to open service manager\n"));
if (pgwin32_IsInstalled(hSCM))
{
CloseServiceHandle(hSCM);
- write_stderr(_("Service \"%s\" already registered\n"),register_servicename);
+ write_stderr(_("Service \"%s\" already registered\n"), register_servicename);
exit(1);
}
if ((hService = CreateService(hSCM, register_servicename, register_servicename,
- SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
- SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
+ SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS,
+ SERVICE_AUTO_START, SERVICE_ERROR_NORMAL,
pgwin32_CommandLine(true),
- NULL, NULL, "RPCSS\0", register_username, register_password)) == NULL)
+ NULL, NULL, "RPCSS\0", register_username, register_password)) == NULL)
{
CloseServiceHandle(hSCM);
- write_stderr(_("Unable to register service \"%s\" [%d]\n"), register_servicename, (int)GetLastError());
+ write_stderr(_("Unable to register service \"%s\" [%d]\n"), register_servicename, (int) GetLastError());
exit(1);
}
CloseServiceHandle(hService);
static void
pgwin32_doUnregister()
{
- SC_HANDLE hService;
- SC_HANDLE hSCM = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
+ SC_HANDLE hService;
+ SC_HANDLE hSCM = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS);
+
if (hSCM == NULL)
{
write_stderr(_("Unable to open service manager\n"));
if (!pgwin32_IsInstalled(hSCM))
{
CloseServiceHandle(hSCM);
- write_stderr(_("Service \"%s\" not registered\n"),register_servicename);
+ write_stderr(_("Service \"%s\" not registered\n"), register_servicename);
exit(1);
}
if ((hService = OpenService(hSCM, register_servicename, DELETE)) == NULL)
{
CloseServiceHandle(hSCM);
- write_stderr(_("Unable to open service \"%s\" [%d]\n"), register_servicename, (int)GetLastError());
+ write_stderr(_("Unable to open service \"%s\" [%d]\n"), register_servicename, (int) GetLastError());
exit(1);
}
- if (!DeleteService(hService)) {
+ if (!DeleteService(hService))
+ {
CloseServiceHandle(hService);
CloseServiceHandle(hSCM);
- write_stderr(_("Unable to unregister service \"%s\" [%d]\n"), register_servicename, (int)GetLastError());
+ write_stderr(_("Unable to unregister service \"%s\" [%d]\n"), register_servicename, (int) GetLastError());
exit(1);
}
CloseServiceHandle(hService);
static SERVICE_STATUS status;
-static SERVICE_STATUS_HANDLE hStatus = (SERVICE_STATUS_HANDLE)0;
+static SERVICE_STATUS_HANDLE hStatus = (SERVICE_STATUS_HANDLE) 0;
static HANDLE shutdownHandles[2];
static pid_t postmasterPID = -1;
-#define shutdownEvent shutdownHandles[0]
+
+#define shutdownEvent shutdownHandles[0]
#define postmasterProcess shutdownHandles[1]
-static void pgwin32_SetServiceStatus(DWORD currentState)
+static void
+pgwin32_SetServiceStatus(DWORD currentState)
{
status.dwCurrentState = currentState;
- SetServiceStatus(hStatus, (LPSERVICE_STATUS)&status);
+ SetServiceStatus(hStatus, (LPSERVICE_STATUS) & status);
}
-static void WINAPI pgwin32_ServiceHandler(DWORD request)
+static void WINAPI
+pgwin32_ServiceHandler(DWORD request)
{
switch (request)
{
case SERVICE_CONTROL_STOP:
case SERVICE_CONTROL_SHUTDOWN:
- /*
- * We only need a short wait hint here as it just needs to wait for
- * the next checkpoint. They occur every 5 seconds during shutdown
+
+ /*
+ * We only need a short wait hint here as it just needs to
+ * wait for the next checkpoint. They occur every 5 seconds
+ * during shutdown
*/
- status.dwWaitHint = 10000;
+ status.dwWaitHint = 10000;
pgwin32_SetServiceStatus(SERVICE_STOP_PENDING);
SetEvent(shutdownEvent);
return;
case SERVICE_CONTROL_PAUSE:
/* Win32 config reloading */
status.dwWaitHint = 5000;
- kill(postmasterPID,SIGHUP);
+ kill(postmasterPID, SIGHUP);
return;
- /* FIXME: These could be used to replace other signals etc */
+ /* FIXME: These could be used to replace other signals etc */
case SERVICE_CONTROL_CONTINUE:
case SERVICE_CONTROL_INTERROGATE:
default:
}
}
-static void WINAPI pgwin32_ServiceMain(DWORD argc, LPTSTR *argv)
+static void WINAPI
+pgwin32_ServiceMain(DWORD argc, LPTSTR * argv)
{
STARTUPINFO si;
PROCESS_INFORMATION pi;
- DWORD ret;
+ DWORD ret;
/* Initialize variables */
- status.dwWin32ExitCode = S_OK;
- status.dwCheckPoint = 0;
- status.dwWaitHint = 60000;
- status.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
- status.dwControlsAccepted = SERVICE_ACCEPT_STOP|SERVICE_ACCEPT_SHUTDOWN|SERVICE_ACCEPT_PAUSE_CONTINUE;
- status.dwServiceSpecificExitCode = 0;
+ status.dwWin32ExitCode = S_OK;
+ status.dwCheckPoint = 0;
+ status.dwWaitHint = 60000;
+ status.dwServiceType = SERVICE_WIN32_OWN_PROCESS;
+ status.dwControlsAccepted = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN | SERVICE_ACCEPT_PAUSE_CONTINUE;
+ status.dwServiceSpecificExitCode = 0;
status.dwCurrentState = SERVICE_START_PENDING;
- memset(&pi,0,sizeof(pi));
- memset(&si,0,sizeof(si));
+ memset(&pi, 0, sizeof(pi));
+ memset(&si, 0, sizeof(si));
si.cb = sizeof(si);
/* Register the control request handler */
- if ((hStatus = RegisterServiceCtrlHandler(register_servicename, pgwin32_ServiceHandler)) == (SERVICE_STATUS_HANDLE)0)
+ if ((hStatus = RegisterServiceCtrlHandler(register_servicename, pgwin32_ServiceHandler)) == (SERVICE_STATUS_HANDLE) 0)
return;
- if ((shutdownEvent = CreateEvent(NULL,true,false,NULL)) == NULL)
+ if ((shutdownEvent = CreateEvent(NULL, true, false, NULL)) == NULL)
return;
/* Start the postmaster */
pgwin32_SetServiceStatus(SERVICE_START_PENDING);
- if (!CreateProcess(NULL,pgwin32_CommandLine(false),NULL,NULL,TRUE,0,NULL,NULL,&si,&pi))
+ if (!CreateProcess(NULL, pgwin32_CommandLine(false), NULL, NULL, TRUE, 0, NULL, NULL, &si, &pi))
{
pgwin32_SetServiceStatus(SERVICE_STOPPED);
return;
}
- postmasterPID = pi.dwProcessId;
- postmasterProcess = pi.hProcess;
+ postmasterPID = pi.dwProcessId;
+ postmasterProcess = pi.hProcess;
CloseHandle(pi.hThread);
pgwin32_SetServiceStatus(SERVICE_RUNNING);
/* Wait for quit... */
- ret = WaitForMultipleObjects(2,shutdownHandles,FALSE,INFINITE);
+ ret = WaitForMultipleObjects(2, shutdownHandles, FALSE, INFINITE);
pgwin32_SetServiceStatus(SERVICE_STOP_PENDING);
switch (ret)
{
- case WAIT_OBJECT_0: /* shutdown event */
- kill(postmasterPID,SIGINT);
-
- /*
- * Increment the checkpoint and try again
- * Abort after 12 checkpoints as the postmaster has probably hung
+ case WAIT_OBJECT_0: /* shutdown event */
+ kill(postmasterPID, SIGINT);
+
+ /*
+ * Increment the checkpoint and try again Abort after 12
+ * checkpoints as the postmaster has probably hung
*/
- while (WaitForSingleObject(postmasterProcess,5000) == WAIT_TIMEOUT && status.dwCheckPoint < 12)
- {
+ while (WaitForSingleObject(postmasterProcess, 5000) == WAIT_TIMEOUT && status.dwCheckPoint < 12)
status.dwCheckPoint++;
- }
break;
- case (WAIT_OBJECT_0+1): /* postmaster went down */
+ case (WAIT_OBJECT_0 + 1): /* postmaster went down */
break;
default:
pgwin32_SetServiceStatus(SERVICE_STOPPED);
}
-static void pgwin32_doRunAsService()
+static void
+pgwin32_doRunAsService()
{
- SERVICE_TABLE_ENTRY st[] = {{ register_servicename, pgwin32_ServiceMain },
- { NULL, NULL }};
+ SERVICE_TABLE_ENTRY st[] = {{register_servicename, pgwin32_ServiceMain},
+ {NULL, NULL}};
+
StartServiceCtrlDispatcher(st);
}
-
#endif
static void
do_help(void)
{
printf(_("%s is a utility to start, stop, restart, reload configuration files,\n"
- "report the status of a PostgreSQL server, or kill a PostgreSQL process\n\n"), progname);
+ "report the status of a PostgreSQL server, or kill a PostgreSQL process\n\n"), progname);
printf(_("Usage:\n"));
printf(_(" %s start [-w] [-D DATADIR] [-s] [-l FILENAME] [-o \"OPTIONS\"]\n"), progname);
printf(_(" %s stop [-W] [-D DATADIR] [-s] [-m SHUTDOWN-MODE]\n"), progname);
printf(_("If the -D option is omitted, the environment variable PGDATA is used.\n\n"));
printf(_("Options for start or restart:\n"));
printf(_(" -l, --log FILENAME write (or append) server log to FILENAME. The\n"
- " use of this option is highly recommended.\n"));
+ " use of this option is highly recommended.\n"));
printf(_(" -o OPTIONS command line options to pass to the postmaster\n"
- " (PostgreSQL server executable)\n"));
+ " (PostgreSQL server executable)\n"));
printf(_(" -p PATH-TO-POSTMASTER normally not necessary\n\n"));
printf(_("Options for stop or restart:\n"));
printf(_(" -m SHUTDOWN-MODE may be 'smart', 'fast', or 'immediate'\n\n"));
int option_index;
int c;
pgpid_t killproc = 0;
-
+
#ifdef WIN32
setvbuf(stderr, NULL, _IONBF, 0);
#endif
umask(077);
- if (argc > 1)
- {
+ if (argc > 1)
+ {
if (strcmp(argv[1], "-h") == 0 || strcmp(argv[1], "--help") == 0 ||
strcmp(argv[1], "-?") == 0)
{
}
/*
- * 'Action' can be before or after args so loop over both.
- * Some getopt_long() implementations will reorder argv[]
- * to place all flags first (GNU?), but we don't rely on it.
- * Our /port version doesn't do that.
+ * 'Action' can be before or after args so loop over both. Some
+ * getopt_long() implementations will reorder argv[] to place all
+ * flags first (GNU?), but we don't rely on it. Our /port version
+ * doesn't do that.
*/
optind = 1;
-
+
/* process command-line options */
while (optind < argc)
{
switch (c)
{
case 'D':
- {
- int len = strlen(optarg);
- char *env_var;
-
- env_var = xmalloc(len + 8);
- snprintf(env_var, len + 8, "PGDATA=%s", optarg);
- putenv(env_var);
- /* Show -D for easier postmaster 'ps' identification */
- pgdata_opt = xmalloc(len + 7);
- snprintf(pgdata_opt, len + 7, "-D \"%s\" ", optarg);
- break;
- }
+ {
+ int len = strlen(optarg);
+ char *env_var;
+
+ env_var = xmalloc(len + 8);
+ snprintf(env_var, len + 8, "PGDATA=%s", optarg);
+ putenv(env_var);
+
+ /*
+ * Show -D for easier postmaster 'ps'
+ * identification
+ */
+ pgdata_opt = xmalloc(len + 7);
+ snprintf(pgdata_opt, len + 7, "-D \"%s\" ", optarg);
+ break;
+ }
case 'l':
log_file = xstrdup(optarg);
break;
postgres_path = xstrdup(optarg);
break;
case 'P':
- register_password = xstrdup(optarg);
+ register_password = xstrdup(optarg);
break;
case 's':
silence_echo = true;
break;
case 'U':
- if (strchr(optarg,'\\'))
- register_username = xstrdup(optarg);
- else /* Prepend .\ for local accounts */
+ if (strchr(optarg, '\\'))
+ register_username = xstrdup(optarg);
+ else
+/* Prepend .\ for local accounts */
{
- register_username = malloc(strlen(optarg)+3);
+ register_username = malloc(strlen(optarg) + 3);
if (!register_username)
{
write_stderr(_("%s: out of memory\n"), progname);
exit(1);
}
- strcpy(register_username,".\\");
- strcat(register_username,optarg);
+ strcpy(register_username, ".\\");
+ strcat(register_username, optarg);
}
break;
case 'w':
exit(1);
}
}
-
+
/* Process an action */
if (optind < argc)
{
do_advice();
exit(1);
}
-
+
if (strcmp(argv[optind], "start") == 0)
ctl_command = START_COMMAND;
else if (strcmp(argv[optind], "stop") == 0)
do_kill(killproc);
break;
#ifdef WIN32
- case REGISTER_COMMAND:
- pgwin32_doRegister();
- break;
- case UNREGISTER_COMMAND:
- pgwin32_doUnregister();
- break;
- case RUN_AS_SERVICE_COMMAND:
- pgwin32_doRunAsService();
- break;
+ case REGISTER_COMMAND:
+ pgwin32_doRegister();
+ break;
+ case UNREGISTER_COMMAND:
+ pgwin32_doUnregister();
+ break;
+ case RUN_AS_SERVICE_COMMAND:
+ pgwin32_doRunAsService();
+ break;
#endif
default:
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.83 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/common.c,v 1.84 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* These variables are static to avoid the notational cruft of having to pass
* them into findTableByOid() and friends.
*/
-static TableInfo *tblinfo;
-static TypeInfo *typinfo;
-static FuncInfo *funinfo;
-static OprInfo *oprinfo;
-static int numTables;
-static int numTypes;
-static int numFuncs;
-static int numOperators;
+static TableInfo *tblinfo;
+static TypeInfo *typinfo;
+static FuncInfo *funinfo;
+static OprInfo *oprinfo;
+static int numTables;
+static int numTypes;
+static int numFuncs;
+static int numOperators;
static void flagInhTables(TableInfo *tbinfo, int numTables,
InhInfo *inhinfo, int numInherits);
static int DOCatalogIdCompare(const void *p1, const void *p2);
static void findParentsByOid(TableInfo *self,
- InhInfo *inhinfo, int numInherits);
+ InhInfo *inhinfo, int numInherits);
static int strInArray(const char *pattern, char **arr, int arr_size);
{
defaultsFound = true;
defaultsMatch &= (strcmp(attrDef->adef_expr,
- inhDef->adef_expr) == 0);
+ inhDef->adef_expr) == 0);
}
}
}
for (k = 0; k < numParents; k++)
{
- int l;
+ int l;
parent = parents[k];
for (l = 0; l < parent->ncheck; l++)
while (dobj->dumpId >= allocedDumpIds)
{
- int newAlloc;
+ int newAlloc;
if (allocedDumpIds <= 0)
{
*
* We use binary search in a sorted list that is built on first call.
* If AssignDumpId() and findObjectByCatalogId() calls were intermixed,
- * the code would work, but possibly be very slow. In the current usage
+ * the code would work, but possibly be very slow. In the current usage
* pattern that does not happen, indeed we only need to build the list once.
*/
DumpableObject *
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.14 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.c,v 1.15 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
appendStringLiteralDQ(PQExpBuffer buf, const char *str, const char *dqprefix)
{
static const char suffixes[] = "_XXXXXXX";
- int nextchar = 0;
+ int nextchar = 0;
PQExpBuffer delimBuf = createPQExpBuffer();
/* start with $ + dqprefix if not NULL */
appendPQExpBuffer(delimBuf, dqprefix);
/*
- * Make sure we choose a delimiter which (without the trailing $)
- * is not present in the string being quoted. We don't check with the
+ * Make sure we choose a delimiter which (without the trailing $) is
+ * not present in the string being quoted. We don't check with the
* trailing $ because a string ending in $foo must not be quoted with
* $foo$.
*/
while (strstr(str, delimBuf->data) != NULL)
{
appendPQExpBufferChar(delimBuf, suffixes[nextchar++]);
- nextchar %= sizeof(suffixes)-1;
+ nextchar %= sizeof(suffixes) - 1;
}
/* add trailing $ */
bool escapeAll, const char *dqprefix)
{
if (strchr(str, '\'') == NULL && strchr(str, '\\') == NULL)
- appendStringLiteral(buf,str,escapeAll);
+ appendStringLiteral(buf, str, escapeAll);
else
- appendStringLiteralDQ(buf,str,dqprefix);
+ appendStringLiteralDQ(buf, str, dqprefix);
}
while (*input && *input != '=')
{
- /* If user name isn't quoted, then just add it to the output buffer */
+ /*
+ * If user name isn't quoted, then just add it to the output
+ * buffer
+ */
if (*input != '"')
appendPQExpBufferChar(output, *input++);
else
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.12 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/dumputils.h,v 1.13 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern const char *fmtId(const char *identifier);
extern void appendStringLiteral(PQExpBuffer buf, const char *str,
bool escapeAll);
-extern void appendStringLiteralDQ(PQExpBuffer buf, const char *str,
- const char *dqprefix);
-extern void appendStringLiteralDQOpt(PQExpBuffer buf, const char *str,
- bool escapeAll, const char *dqprefix);
+extern void appendStringLiteralDQ(PQExpBuffer buf, const char *str,
+ const char *dqprefix);
+extern void appendStringLiteralDQOpt(PQExpBuffer buf, const char *str,
+ bool escapeAll, const char *dqprefix);
extern int parse_version(const char *versionString);
extern bool parsePGArray(const char *atext, char ***itemarray, int *nitems);
extern bool buildACLCommands(const char *name, const char *type,
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.32 2004/08/20 04:20:22 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.33 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct _restoreOptions
{
int create; /* Issue commands to create the database */
- int noOwner; /* Don't try to match original object owner */
+ int noOwner; /* Don't try to match original object
+ * owner */
int disable_triggers; /* disable triggers during
* data-only restore */
- int use_setsessauth; /* Use SET SESSION AUTHORIZATION commands instead of OWNER TO */
+ int use_setsessauth;/* Use SET SESSION AUTHORIZATION commands
+ * instead of OWNER TO */
char *superuser; /* Username to use as superuser */
int dataOnly;
int dropSchema;
/* This extension allows gcc to check the format string */
__attribute__((format(printf, 2, 3)));
-#endif /* PG_BACKUP_H */
+#endif /* PG_BACKUP_H */
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.94 2004/08/20 20:00:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.95 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static ArchiveHandle *_allocAH(const char *FileSpec, const ArchiveFormat fmt,
const int compression, ArchiveMode mode);
-static char *_getObjectFromDropStmt(const char *dropStmt, const char *type);
+static char *_getObjectFromDropStmt(const char *dropStmt, const char *type);
static void _printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass);
static void fixPriorBlobRefs(ArchiveHandle *AH, TocEntry *blobte,
- RestoreOptions *ropt);
+ RestoreOptions *ropt);
static void _doSetFixedOutputState(ArchiveHandle *AH);
static void _doSetSessionAuth(ArchiveHandle *AH, const char *user);
static void _doSetWithOids(ArchiveHandle *AH, const bool withOids);
ConnectDatabase(AHX, ropt->dbname,
ropt->pghost, ropt->pgport, ropt->username,
ropt->requirePassword, ropt->ignoreVersion);
- /* If we're talking to the DB directly, don't send comments since they obscure SQL when displaying errors */
+
+ /*
+ * If we're talking to the DB directly, don't send comments since
+ * they obscure SQL when displaying errors
+ */
AH->noTocComments = 1;
}
/*
* If we can output the data, then restore it.
*/
- if (AH->PrintTocDataPtr != NULL && (reqs & REQ_DATA) != 0)
+ if (AH->PrintTocDataPtr !=NULL && (reqs & REQ_DATA) != 0)
{
#ifndef HAVE_LIBZ
if (AH->compression != 0)
* If we just restored blobs, fix references in
* previously-loaded tables; otherwise, if we
* previously restored blobs, fix references in
- * this table. Note that in standard cases the BLOBS
- * entry comes after all TABLE DATA entries, but
- * we should cope with other orders in case the
- * user demands reordering.
+ * this table. Note that in standard cases the
+ * BLOBS entry comes after all TABLE DATA entries,
+ * but we should cope with other orders in case
+ * the user demands reordering.
*/
if (strcmp(te->desc, "BLOBS") == 0)
fixPriorBlobRefs(AH, te, ropt);
}
}
te = te->next;
- } /* end loop over TOC entries */
+ } /* end loop over TOC entries */
/*
* Scan TOC again to output ownership commands and ACLs
/*
* After restoring BLOBS, fix all blob references in previously-restored
- * tables. (Normally, the BLOBS entry should appear after all TABLE DATA
+ * tables. (Normally, the BLOBS entry should appear after all TABLE DATA
* entries, so this will in fact handle all blob references.)
*/
static void
/*
* Become superuser if possible, since they are the only ones who can
- * update pg_class. If -S was not given, assume the initial user identity
- * is a superuser.
+ * update pg_class. If -S was not given, assume the initial user
+ * identity is a superuser.
*/
_becomeUser(AH, ropt->superuser);
/*
* Become superuser if possible, since they are the only ones who can
- * update pg_class. If -S was not given, assume the initial user identity
- * is a superuser.
+ * update pg_class. If -S was not given, assume the initial user
+ * identity is a superuser.
*/
_becomeUser(AH, ropt->superuser);
newToc->formatData = NULL;
- if (AH->ArchiveEntryPtr != NULL)
+ if (AH->ArchiveEntryPtr !=NULL)
(*AH->ArchiveEntryPtr) (AH, newToc);
}
char *fmtName;
if (ropt->filename)
- sav = SetOutput(AH, ropt->filename, 0 /* no compression */);
+ sav = SetOutput(AH, ropt->filename, 0 /* no compression */ );
ahprintf(AH, ";\n; Archive created at %s", ctime(&AH->createDate));
ahprintf(AH, "; dbname: %s\n; TOC Entries: %d\n; Compression: %d\n",
/*
* This is paranoid: deal with the possibility that vsnprintf is
- * willing to ignore trailing null
- * or returns > 0 even if string does not fit. It may be the case that
- * it returns cnt = bufsize
+ * willing to ignore trailing null or returns > 0 even if string does
+ * not fit. It may be the case that it returns cnt = bufsize
*/
while (cnt < 0 || cnt >= (bSize - 1))
{
/* on some error, we may decide to go on... */
void
-warn_or_die_horribly(ArchiveHandle *AH,
- const char *modulename, const char *fmt, ...)
+warn_or_die_horribly(ArchiveHandle *AH,
+ const char *modulename, const char *fmt,...)
{
- va_list ap;
+ va_list ap;
- switch(AH->stage) {
+ switch (AH->stage)
+ {
case STAGE_NONE:
/* Do nothing special */
break;
case STAGE_INITIALIZING:
- if (AH->stage != AH->lastErrorStage) {
+ if (AH->stage != AH->lastErrorStage)
write_msg(modulename, "Error while INITIALIZING:\n");
- }
break;
case STAGE_PROCESSING:
- if (AH->stage != AH->lastErrorStage) {
+ if (AH->stage != AH->lastErrorStage)
write_msg(modulename, "Error while PROCESSING TOC:\n");
- }
break;
case STAGE_FINALIZING:
- if (AH->stage != AH->lastErrorStage) {
+ if (AH->stage != AH->lastErrorStage)
write_msg(modulename, "Error while FINALIZING:\n");
- }
break;
}
- if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE) {
+ if (AH->currentTE != NULL && AH->currentTE != AH->lastErrorTE)
+ {
write_msg(modulename, "Error from TOC Entry %d; %u %u %s %s %s\n", AH->currentTE->dumpId,
- AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
- AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
+ AH->currentTE->catalogId.tableoid, AH->currentTE->catalogId.oid,
+ AH->currentTE->desc, AH->currentTE->tag, AH->currentTE->owner);
}
AH->lastErrorStage = AH->stage;
AH->lastErrorTE = AH->currentTE;
va_start(ap, fmt);
if (AH->public.exit_on_error)
- {
_die_horribly(AH, modulename, fmt, ap);
- }
else
{
_write_msg(modulename, fmt, ap);
pos->prev->next = te;
pos->prev = te;
}
-
#endif
static TocEntry *
* later if necessary */
AH->currSchema = strdup(""); /* ditto */
AH->currWithOids = -1; /* force SET */
-
+
AH->toc = (TocEntry *) calloc(1, sizeof(TocEntry));
if (!AH->toc)
die_horribly(AH, modulename, "out of memory\n");
/* Sanity check */
if (te->dumpId <= 0)
die_horribly(AH, modulename,
- "entry ID %d out of range -- perhaps a corrupt TOC\n",
+ "entry ID %d out of range -- perhaps a corrupt TOC\n",
te->dumpId);
te->hadDumper = ReadInt(AH);
}
else
te->withOids = true;
-
+
/* Read TOC entry dependencies */
if (AH->version >= K_VERS_1_5)
{
PQExpBuffer cmd = createPQExpBuffer();
appendPQExpBuffer(cmd, "SET default_with_oids = %s;", withOids ?
- "true" : "false");
+ "true" : "false");
if (RestoringToDB(AH))
{
res = PQexec(AH->connection, cmd->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
- warn_or_die_horribly(AH, modulename,
+ warn_or_die_horribly(AH, modulename,
"could not set default_with_oids: %s",
PQerrorMessage(AH->connection));
free(AH->currSchema);
AH->currSchema = strdup("");
AH->currWithOids = -1;
-
+
/* re-establish fixed state */
_doSetFixedOutputState(AH);
}
}
/*
- * Become the owner of the the given TOC entry object. If
+ * Become the owner of the the given TOC entry object. If
* changes in ownership are not allowed, this doesn't do anything.
*/
static void
res = PQexec(AH->connection, qry->data);
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
- warn_or_die_horribly(AH, modulename,
- "could not set search_path to \"%s\": %s",
- schemaName, PQerrorMessage(AH->connection));
+ warn_or_die_horribly(AH, modulename,
+ "could not set search_path to \"%s\": %s",
+ schemaName, PQerrorMessage(AH->connection));
PQclear(res);
}
_getObjectFromDropStmt(const char *dropStmt, const char *type)
{
/* Chop "DROP" off the front and make a copy */
- char *first = strdup(dropStmt + 5);
- char *last = first + strlen(first) - 1; /* Points to the last real char in extract */
- char *buf = NULL;
+ char *first = strdup(dropStmt + 5);
+ char *last = first + strlen(first) - 1; /* Points to the last
+ * real char in extract */
+ char *buf = NULL;
- /* Loop from the end of the string until last char is no longer '\n' or ';' */
- while (last >= first && (*last == '\n' || *last == ';')) {
+ /*
+ * Loop from the end of the string until last char is no longer '\n'
+ * or ';'
+ */
+ while (last >= first && (*last == '\n' || *last == ';'))
last--;
- }
/* Insert end of string one place after last */
*(last + 1) = '\0';
- /* Take off CASCADE if necessary. Only TYPEs seem to have this, but may
- * as well check for all */
- if ((last - first) >= 8) {
+ /*
+ * Take off CASCADE if necessary. Only TYPEs seem to have this, but
+ * may as well check for all
+ */
+ if ((last - first) >= 8)
+ {
if (strcmp(last - 7, " CASCADE") == 0)
last -= 8;
}
/* Special case VIEWs and SEQUENCEs. They must use ALTER TABLE. */
if (strcmp(type, "VIEW") == 0 && (last - first) >= 5)
{
- int len = 6 + strlen(first + 5) + 1;
+ int len = 6 + strlen(first + 5) + 1;
+
buf = malloc(len);
snprintf(buf, len, "TABLE %s", first + 5);
- free (first);
+ free(first);
}
else if (strcmp(type, "SEQUENCE") == 0 && (last - first) >= 9)
{
- int len = 6 + strlen(first + 9) + 1;
+ int len = 6 + strlen(first + 9) + 1;
+
buf = malloc(len);
snprintf(buf, len, "TABLE %s", first + 9);
- free (first);
+ free(first);
}
else
- {
buf = first;
- }
return buf;
}
static void
_printTocEntry(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt, bool isData, bool acl_pass)
{
- const char *pfx;
+ const char *pfx;
/* ACLs are dumped only during acl pass */
if (acl_pass)
return;
}
- if (AH->noTocComments)
+ if (AH->noTocComments)
return;
/*
te->dumpId, te->catalogId.tableoid, te->catalogId.oid);
if (te->nDeps > 0)
{
- int i;
+ int i;
ahprintf(AH, "-- Dependencies:");
for (i = 0; i < te->nDeps; i++)
pfx, te->tag, te->desc,
te->namespace ? te->namespace : "-",
te->owner);
- if (AH->PrintExtraTocPtr != NULL)
+ if (AH->PrintExtraTocPtr !=NULL)
(*AH->PrintExtraTocPtr) (AH, te);
ahprintf(AH, "--\n\n");
/*
* Actually print the definition.
*
- * Really crude hack for suppressing AUTHORIZATION clause of CREATE SCHEMA
- * when --no-owner mode is selected. This is ugly, but I see no other
- * good way ...
+ * Really crude hack for suppressing AUTHORIZATION clause of CREATE
+ * SCHEMA when --no-owner mode is selected. This is ugly, but I see
+ * no other good way ...
*/
if (AH->ropt && AH->ropt->noOwner && strcmp(te->desc, "SCHEMA") == 0)
{
/*
* If we aren't using SET SESSION AUTH to determine ownership, we must
- * instead issue an ALTER OWNER command. Ugly, since we have to
- * cons one up based on the dropStmt. We don't need this for schemas
- * (since we use CREATE SCHEMA AUTHORIZATION instead), nor for some other
- * object types.
+ * instead issue an ALTER OWNER command. Ugly, since we have to cons
+ * one up based on the dropStmt. We don't need this for schemas
+ * (since we use CREATE SCHEMA AUTHORIZATION instead), nor for some
+ * other object types.
*/
if (!ropt->noOwner && !ropt->use_setsessauth &&
strlen(te->owner) > 0 && strlen(te->dropStmt) > 0 &&
strcmp(te->desc, "VIEW") == 0 ||
strcmp(te->desc, "SEQUENCE") == 0))
{
- char *temp = _getObjectFromDropStmt(te->dropStmt, te->desc);
+ char *temp = _getObjectFromDropStmt(te->dropStmt, te->desc);
ahprintf(AH, "ALTER %s OWNER TO %s;\n\n", temp, fmtId(te->owner));
free(temp);
/*
* If it's an ACL entry, it might contain SET SESSION AUTHORIZATION
- * commands, so we can no longer assume we know the current auth setting.
+ * commands, so we can no longer assume we know the current auth
+ * setting.
*/
if (strncmp(te->desc, "ACL", 3) == 0)
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.60 2004/08/20 20:00:34 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.61 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define K_VERS_1_6 (( (1 * 256 + 6) * 256 + 0) * 256 + 0) /* Schema field in TOCs */
#define K_VERS_1_7 (( (1 * 256 + 7) * 256 + 0) * 256 + 0) /* File Offset size in
* header */
-#define K_VERS_1_8 (( (1 * 256 + 8) * 256 + 0) * 256 + 0) /* change interpretation of ID numbers and dependencies */
-#define K_VERS_1_9 (( (1 * 256 + 9) * 256 + 0) * 256 + 0) /* add default_with_oids tracking */
+#define K_VERS_1_8 (( (1 * 256 + 8) * 256 + 0) * 256 + 0) /* change interpretation
+ * of ID numbers and
+ * dependencies */
+#define K_VERS_1_9 (( (1 * 256 + 9) * 256 + 0) * 256 + 0) /* add default_with_oids
+ * tracking */
#define K_VERS_MAX (( (1 * 256 + 9) * 256 + 255) * 256 + 0)
char lastChar;
char quoteChar;
int braceDepth;
- PQExpBuffer tagBuf;
+ PQExpBuffer tagBuf;
} sqlparseInfo;
-typedef enum
+typedef enum
{
- STAGE_NONE = 0,
- STAGE_INITIALIZING,
- STAGE_PROCESSING,
- STAGE_FINALIZING
+ STAGE_NONE = 0,
+ STAGE_INITIALIZING,
+ STAGE_PROCESSING,
+ STAGE_FINALIZING
} ArchiverStage;
typedef struct _archiveHandle
char *currUser; /* current username */
char *currSchema; /* current schema */
bool currWithOids; /* current default_with_oids setting */
-
+
void *lo_buf;
size_t lo_buf_used;
size_t lo_buf_size;
- int noTocComments;
- ArchiverStage stage;
- ArchiverStage lastErrorStage;
- struct _tocEntry *currentTE;
- struct _tocEntry *lastErrorTE;
+ int noTocComments;
+ ArchiverStage stage;
+ ArchiverStage lastErrorStage;
+ struct _tocEntry *currentTE;
+ struct _tocEntry *lastErrorTE;
} ArchiveHandle;
typedef struct _tocEntry
* Implements the basic DB functions used by the archiver.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.56 2004/08/28 22:52:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.57 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static char *_sendSQLLine(ArchiveHandle *AH, char *qry, char *eos);
static char *_sendCopyLine(ArchiveHandle *AH, char *qry, char *eos);
-static int _isIdentChar(unsigned char c);
-static int _isDQChar(unsigned char c, int atStart);
+static int _isIdentChar(unsigned char c);
+static int _isDQChar(unsigned char c, int atStart);
#define DB_MAX_ERR_STMT 128
_executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc)
{
PGresult *res;
- char errStmt[DB_MAX_ERR_STMT];
+ char errStmt[DB_MAX_ERR_STMT];
/* fprintf(stderr, "Executing: '%s'\n\n", qry->data); */
res = PQexec(conn, qry->data);
else
{
strncpy(errStmt, qry->data, DB_MAX_ERR_STMT);
- if (errStmt[DB_MAX_ERR_STMT-1] != '\0') {
- errStmt[DB_MAX_ERR_STMT-4] = '.';
- errStmt[DB_MAX_ERR_STMT-3] = '.';
- errStmt[DB_MAX_ERR_STMT-2] = '.';
- errStmt[DB_MAX_ERR_STMT-1] = '\0';
+ if (errStmt[DB_MAX_ERR_STMT - 1] != '\0')
+ {
+ errStmt[DB_MAX_ERR_STMT - 4] = '.';
+ errStmt[DB_MAX_ERR_STMT - 3] = '.';
+ errStmt[DB_MAX_ERR_STMT - 2] = '.';
+ errStmt[DB_MAX_ERR_STMT - 1] = '\0';
}
warn_or_die_horribly(AH, modulename, "%s: %s Command was: %s\n",
- desc, PQerrorMessage(AH->connection),
- errStmt);
+ desc, PQerrorMessage(AH->connection),
+ errStmt);
}
}
_sendSQLLine(ArchiveHandle *AH, char *qry, char *eos)
{
int pos = 0; /* Current position */
- char *sqlPtr;
- int consumed;
+ char *sqlPtr;
+ int consumed;
int startDT = 0;
/*
/* Loop until character consumed */
do
{
- /* If a character needs to be scanned in a different state,
- * consumed can be set to 0 to avoid advancing. Care must
- * be taken to ensure internal state is not damaged.
+ /*
+ * If a character needs to be scanned in a different state,
+ * consumed can be set to 0 to avoid advancing. Care must be
+ * taken to ensure internal state is not damaged.
*/
consumed = 1;
switch (AH->sqlparse.state)
- {
-
- case SQL_SCAN: /* Default state == 0, set in _allocAH */
+ {
+
+ case SQL_SCAN: /* Default state == 0, set in _allocAH */
if (qry[pos] == ';' && AH->sqlparse.braceDepth == 0)
{
- /* We've got the end of a statement.
- * Send It & reset the buffer.
+ /*
+ * We've got the end of a statement. Send It &
+ * reset the buffer.
*/
-
+
/*
* fprintf(stderr, " sending: '%s'\n\n",
* AH->sqlBuf->data);
ExecuteSqlCommand(AH, AH->sqlBuf, "could not execute query", false);
resetPQExpBuffer(AH->sqlBuf);
AH->sqlparse.lastChar = '\0';
-
+
/*
- * Remove any following newlines - so that embedded
- * COPY commands don't get a starting newline.
+ * Remove any following newlines - so that
+ * embedded COPY commands don't get a starting
+ * newline.
*/
pos++;
for (; pos < (eos - qry) && qry[pos] == '\n'; pos++);
-
+
/* We've got our line, so exit */
return qry + pos;
}
else
{
- /*
- * Look for normal boring quote chars, or dollar-quotes. We make
- * the assumption that $-quotes will not have an ident character
+ /*
+ * Look for normal boring quote chars, or
+ * dollar-quotes. We make the assumption that
+ * $-quotes will not have an ident character
* before them in all pg_dump output.
*/
- if ( qry[pos] == '"'
- || qry[pos] == '\''
- || ( qry[pos] == '$' && _isIdentChar(AH->sqlparse.lastChar) == 0 )
- )
+ if (qry[pos] == '"'
+ || qry[pos] == '\''
+ || (qry[pos] == '$' && _isIdentChar(AH->sqlparse.lastChar) == 0)
+ )
{
/* fprintf(stderr,"[startquote]\n"); */
AH->sqlparse.state = SQL_IN_QUOTE;
AH->sqlparse.braceDepth++;
else if (qry[pos] == ')')
AH->sqlparse.braceDepth--;
-
+
AH->sqlparse.lastChar = qry[pos];
}
break;
-
+
case SQL_IN_DOLLARTAG:
-
- /* Like a quote, we look for a closing char *but* we only
- * allow a very limited set of contained chars, and no escape chars.
- * If invalid chars are found, we abort tag processing.
+
+ /*
+ * Like a quote, we look for a closing char *but* we
+ * only allow a very limited set of contained chars,
+ * and no escape chars. If invalid chars are found, we
+ * abort tag processing.
*/
-
+
if (qry[pos] == '$')
{
/* fprintf(stderr,"[endquote]\n"); */
}
else
{
- if ( _isDQChar(qry[pos], startDT) )
+ if (_isDQChar(qry[pos], startDT))
{
/* Valid, so add */
appendPQExpBufferChar(AH->sqlparse.tagBuf, qry[pos]);
}
else
{
- /* Jump back to 'scan' state, we're not really in a tag,
- * and valid tag chars do not include the various chars
- * we look for in this state machine, so it's safe to just
- * jump from this state back to SCAN. We set consumed = 0
- * so that this char gets rescanned in new state.
+ /*
+ * Jump back to 'scan' state, we're not really
+ * in a tag, and valid tag chars do not
+ * include the various chars we look for in
+ * this state machine, so it's safe to just
+ * jump from this state back to SCAN. We set
+ * consumed = 0 so that this char gets
+ * rescanned in new state.
*/
destroyPQExpBuffer(AH->sqlparse.tagBuf);
AH->sqlparse.state = SQL_SCAN;
}
startDT = 0;
break;
-
+
case SQL_IN_DOLLARQUOTE:
+
/*
- * Comparing the entire string backwards each time is NOT efficient,
- * but dollar quotes in pg_dump are small and the code is a lot simpler.
+ * Comparing the entire string backwards each time is
+ * NOT efficient, but dollar quotes in pg_dump are
+ * small and the code is a lot simpler.
*/
sqlPtr = AH->sqlBuf->data + AH->sqlBuf->len - AH->sqlparse.tagBuf->len;
-
- if (strncmp(AH->sqlparse.tagBuf->data, sqlPtr, AH->sqlparse.tagBuf->len) == 0) {
+
+ if (strncmp(AH->sqlparse.tagBuf->data, sqlPtr, AH->sqlparse.tagBuf->len) == 0)
+ {
/* End of $-quote */
AH->sqlparse.state = SQL_SCAN;
destroyPQExpBuffer(AH->sqlparse.tagBuf);
}
break;
-
+
case SQL_IN_SQL_COMMENT:
if (qry[pos] == '\n')
AH->sqlparse.state = SQL_SCAN;
break;
-
+
case SQL_IN_EXT_COMMENT:
if (AH->sqlparse.lastChar == '*' && qry[pos] == '/')
AH->sqlparse.state = SQL_SCAN;
break;
-
+
case SQL_IN_QUOTE:
if (!AH->sqlparse.backSlash && AH->sqlparse.quoteChar == qry[pos])
}
else
{
-
+
if (qry[pos] == '\\')
{
if (AH->sqlparse.lastChar == '\\')
AH->sqlparse.backSlash = 0;
}
break;
-
+
}
} while (consumed == 0);
- AH->sqlparse.lastChar = qry[pos];
- /* fprintf(stderr, "\n"); */
+ AH->sqlparse.lastChar = qry[pos];
+ /* fprintf(stderr, "\n"); */
}
/*
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry,
- "INSERT INTO %s(oldOid, newOid) VALUES ('%u', '%u')",
+ "INSERT INTO %s(oldOid, newOid) VALUES ('%u', '%u')",
BLOB_XREF_TABLE, old, new);
ExecuteSqlCommand(AH, qry, "could not create large object cross-reference entry", true);
destroyPQExpBuffer(qry);
}
-static int _isIdentChar(unsigned char c)
+static int
+_isIdentChar(unsigned char c)
{
- if ( (c >= 'a' && c <= 'z')
- || (c >= 'A' && c <= 'Z')
- || (c >= '0' && c <= '9')
- || (c == '_')
- || (c == '$')
- || (c >= (unsigned char)'\200') /* no need to check <= \377 */
- )
- {
+ if ((c >= 'a' && c <= 'z')
+ || (c >= 'A' && c <= 'Z')
+ || (c >= '0' && c <= '9')
+ || (c == '_')
+ || (c == '$')
+ || (c >= (unsigned char) '\200') /* no need to check <=
+ * \377 */
+ )
return 1;
- }
else
- {
return 0;
- }
}
-static int _isDQChar(unsigned char c, int atStart)
+static int
+_isDQChar(unsigned char c, int atStart)
{
- if ( (c >= 'a' && c <= 'z')
- || (c >= 'A' && c <= 'Z')
- || (c == '_')
- || (atStart == 0 && c >= '0' && c <= '9')
- || (c >= (unsigned char)'\200') /* no need to check <= \377 */
- )
- {
+ if ((c >= 'a' && c <= 'z')
+ || (c >= 'A' && c <= 'Z')
+ || (c == '_')
+ || (atStart == 0 && c >= '0' && c <= '9')
+ || (c >= (unsigned char) '\200') /* no need to check <=
+ * \377 */
+ )
return 1;
- }
else
- {
return 0;
- }
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.43 2004/05/07 00:24:58 tgl Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.44 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AH->formatData = (void *) ctx;
ctx->filePos = 0;
ctx->isSpecialScript = 0;
-
+
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;
AH->lo_buf = (void *) malloc(LOBBUFSIZE);
* by PostgreSQL
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.385 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.386 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
char g_comment_start[10];
char g_comment_end[10];
-static const CatalogId nilCatalogId = { 0, 0 };
+static const CatalogId nilCatalogId = {0, 0};
/* these are to avoid passing around info for findNamespace() */
static NamespaceInfo *g_namespaces;
static int g_numNamespaces;
/* flag to turn on/off dollar quoting */
-static int disable_dollar_quoting = 0;
+static int disable_dollar_quoting = 0;
static void help(const char *progname);
static void dumpComment(Archive *fout, const char *target,
const char *namespace, const char *owner,
CatalogId catalogId, int subid, DumpId dumpId);
-static int findComments(Archive *fout, Oid classoid, Oid objoid,
- CommentItem **items);
+static int findComments(Archive *fout, Oid classoid, Oid objoid,
+ CommentItem **items);
static int collectComments(Archive *fout, CommentItem **items);
static void dumpDumpableObject(Archive *fout, DumpableObject *dobj);
static void dumpNamespace(Archive *fout, NamespaceInfo *nspinfo);
static void getDomainConstraints(TypeInfo *tinfo);
static void getTableData(TableInfo *tblinfo, int numTables, bool oids);
static char *format_function_signature(FuncInfo *finfo, char **argnames,
- bool honor_quotes);
+ bool honor_quotes);
static const char *convertRegProcReference(const char *proc);
static const char *convertOperatorReference(const char *opr);
static Oid findLastBuiltinOid_V71(const char *);
static const char *fmtCopyColumnList(const TableInfo *ti);
static void do_sql_command(PGconn *conn, const char *query);
static void check_sql_result(PGresult *res, PGconn *conn, const char *query,
- ExecStatusType expected);
+ ExecStatusType expected);
int
*
* In 7.3 or later, we can rely on dependency information to help us
* determine a safe order, so the initial sort is mostly for cosmetic
- * purposes: we sort by name to ensure that logically identical schemas
- * will dump identically. Before 7.3 we don't have dependencies and
- * we use OID ordering as an (unreliable) guide to creation order.
+ * purposes: we sort by name to ensure that logically identical
+ * schemas will dump identically. Before 7.3 we don't have
+ * dependencies and we use OID ordering as an (unreliable) guide to
+ * creation order.
*/
getDumpableObjects(&dobjs, &numObjs);
sortDumpableObjects(dobjs, numObjs);
/*
- * Create archive TOC entries for all the objects to be dumped,
- * in a safe order.
+ * Create archive TOC entries for all the objects to be dumped, in a
+ * safe order.
*/
if (g_fout->verbose)
/* Now the rearrangeable objects. */
for (i = 0; i < numObjs; i++)
- {
dumpDumpableObject(g_fout, dobjs[i]);
- }
if (g_fout->verbose)
dumpTimestamp(g_fout, "Completed on");
if (oids && hasoids)
{
appendPQExpBuffer(q, "COPY %s %s WITH OIDS TO stdout;",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname),
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname),
column_list);
}
else
{
appendPQExpBuffer(q, "COPY %s %s TO stdout;",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname),
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname),
column_list);
}
res = PQexec(g_conn, q->data);
{
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM ONLY %s",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname));
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname));
}
else
{
appendPQExpBuffer(q, "DECLARE _pg_dump_cursor CURSOR FOR "
"SELECT * FROM %s",
- fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
- classname));
+ fmtQualifiedId(tbinfo->dobj.namespace->dobj.name,
+ classname));
}
res = PQexec(g_conn, q->data);
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(copyBuf, "%s %sFROM stdin;\n",
fmtCopyColumnList(tbinfo),
- (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
+ (tdinfo->oids && tbinfo->hasoids) ? "WITH OIDS " : "");
copyStmt = copyBuf->data;
}
else
tdinfo = (TableDataInfo *) malloc(sizeof(TableDataInfo));
tdinfo->dobj.objType = DO_TABLE_DATA;
+
/*
* Note: use tableoid 0 so that this object won't be mistaken
* for something that pg_depend entries apply to.
if (g_fout->remoteVersion >= 80000)
{
appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
- "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
+ "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
"pg_encoding_to_char(encoding) as encoding, "
"(SELECT spcname FROM pg_tablespace t WHERE t.oid = dattablespace) as tablespace "
"FROM pg_database "
else if (g_fout->remoteVersion >= 70100)
{
appendPQExpBuffer(dbQry, "SELECT tableoid, oid, "
- "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
+ "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
"pg_encoding_to_char(encoding) as encoding, "
"NULL as tablespace "
"FROM pg_database "
appendPQExpBuffer(dbQry, "SELECT "
"(SELECT oid FROM pg_class WHERE relname = 'pg_database') AS tableoid, "
"oid, "
- "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
+ "(SELECT usename FROM pg_user WHERE usesysid = datdba) as dba, "
"pg_encoding_to_char(encoding) as encoding, "
"NULL as tablespace "
"FROM pg_database "
appendStringLiteral(creaQry, encoding, true);
}
if (strlen(tablespace) > 0 && strcmp(tablespace, "pg_default") != 0)
- {
appendPQExpBuffer(creaQry, " TABLESPACE = %s", fmtId(tablespace));
- }
appendPQExpBuffer(creaQry, ";\n");
appendPQExpBuffer(delQry, "DROP DATABASE %s;\n",
static void
dumpTimestamp(Archive *AH, char *msg)
{
- char buf[256];
- time_t now = time(NULL);
+ char buf[256];
+ time_t now = time(NULL);
if (strftime(buf, 256, "%Y-%m-%d %H:%M:%S %Z", localtime(&now)) != 0)
- {
+ {
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry, "-- ");
if (g_fout->remoteVersion >= 80000)
{
appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
- "(select usename from pg_user where nspowner = usesysid) as usename, "
+ "(select usename from pg_user where nspowner = usesysid) as usename, "
"nspacl, "
- "(SELECT spcname FROM pg_tablespace t WHERE t.oid = nsptablespace) AS nsptablespace "
+ "(SELECT spcname FROM pg_tablespace t WHERE t.oid = nsptablespace) AS nsptablespace "
"FROM pg_namespace");
}
else
{
appendPQExpBuffer(query, "SELECT tableoid, oid, nspname, "
- "(select usename from pg_user where nspowner = usesysid) as usename, "
+ "(select usename from pg_user where nspowner = usesysid) as usename, "
"nspacl, NULL AS nsptablespace "
"FROM pg_namespace");
}
"typnamespace, "
"(select usename from pg_user where typowner = usesysid) as usename, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
"0::oid as typnamespace, "
"(select usename from pg_user where typowner = usesysid) as usename, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
"0::oid as typnamespace, "
"(select usename from pg_user where typowner = usesysid) as usename, "
"typinput::oid as typinput, "
- "typoutput::oid as typoutput, typelem, typrelid, "
+ "typoutput::oid as typoutput, typelem, typrelid, "
"CASE WHEN typrelid = 0 THEN ' '::\"char\" "
"ELSE (SELECT relkind FROM pg_class WHERE oid = typrelid) END as typrelkind, "
"typtype, typisdefined "
AssignDumpId(&tinfo[i].dobj);
tinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_typname));
tinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_typnamespace)),
- tinfo[i].dobj.catId.oid);
+ tinfo[i].dobj.catId.oid);
tinfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
tinfo[i].typinput = atooid(PQgetvalue(res, i, i_typinput));
typoutput = atooid(PQgetvalue(res, i, i_typoutput));
/*
* If it's a table's rowtype, use special type code to facilitate
- * sorting into the desired order. (We don't want to consider it
+ * sorting into the desired order. (We don't want to consider it
* an ordinary type because that would bring the table up into the
* datatype part of the dump order.)
*/
/*
* Make sure there are dependencies from the type to its input and
- * output functions. (We don't worry about typsend, typreceive, or
- * typanalyze since those are only valid in 7.4 and later, wherein
- * the standard dependency mechanism will pick them up.)
+ * output functions. (We don't worry about typsend, typreceive,
+ * or typanalyze since those are only valid in 7.4 and later,
+ * wherein the standard dependency mechanism will pick them up.)
*/
funcInfo = findFuncByOid(tinfo[i].typinput);
if (funcInfo)
AssignDumpId(&oprinfo[i].dobj);
oprinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_oprname));
oprinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_oprnamespace)),
- oprinfo[i].dobj.catId.oid);
+ oprinfo[i].dobj.catId.oid);
oprinfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
oprinfo[i].oprcode = atooid(PQgetvalue(res, i, i_oprcode));
int ntups;
int i;
PQExpBuffer query = createPQExpBuffer();
- ConvInfo *convinfo;
+ ConvInfo *convinfo;
int i_tableoid;
int i_oid;
int i_conname;
int i_usename;
/* Conversions didn't exist pre-7.3 */
- if (g_fout->remoteVersion < 70300) {
+ if (g_fout->remoteVersion < 70300)
+ {
*numConversions = 0;
return NULL;
}
appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
"connamespace, "
- "(select usename from pg_user where conowner = usesysid) as usename "
+ "(select usename from pg_user where conowner = usesysid) as usename "
"FROM pg_conversion");
res = PQexec(g_conn, query->data);
AssignDumpId(&convinfo[i].dobj);
convinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_conname));
convinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_connamespace)),
- convinfo[i].dobj.catId.oid);
+ convinfo[i].dobj.catId.oid);
convinfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
}
AssignDumpId(&opcinfo[i].dobj);
opcinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_opcname));
opcinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_opcnamespace)),
- opcinfo[i].dobj.catId.oid);
+ opcinfo[i].dobj.catId.oid);
opcinfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
if (g_fout->remoteVersion >= 70300)
AssignDumpId(&agginfo[i].aggfn.dobj);
agginfo[i].aggfn.dobj.name = strdup(PQgetvalue(res, i, i_aggname));
agginfo[i].aggfn.dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_aggnamespace)),
- agginfo[i].aggfn.dobj.catId.oid);
+ agginfo[i].aggfn.dobj.catId.oid);
agginfo[i].aggfn.usename = strdup(PQgetvalue(res, i, i_usename));
if (strlen(agginfo[i].aggfn.usename) == 0)
write_msg(NULL, "WARNING: owner of aggregate function \"%s\" appears to be invalid\n",
agginfo[i].aggfn.dobj.name);
- agginfo[i].aggfn.lang = InvalidOid; /* not currently interesting */
+ agginfo[i].aggfn.lang = InvalidOid; /* not currently
+ * interesting */
agginfo[i].aggfn.nargs = 1;
agginfo[i].aggfn.argtypes = (Oid *) malloc(sizeof(Oid));
agginfo[i].aggfn.argtypes[0] = atooid(PQgetvalue(res, i, i_aggbasetype));
- agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
+ agginfo[i].aggfn.prorettype = InvalidOid; /* not saved */
agginfo[i].aggfn.proacl = strdup(PQgetvalue(res, i, i_aggacl));
agginfo[i].anybasetype = false; /* computed when it's dumped */
agginfo[i].fmtbasetype = NULL; /* computed when it's dumped */
AssignDumpId(&finfo[i].dobj);
finfo[i].dobj.name = strdup(PQgetvalue(res, i, i_proname));
finfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_pronamespace)),
- finfo[i].dobj.catId.oid);
+ finfo[i].dobj.catId.oid);
finfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
finfo[i].lang = atooid(PQgetvalue(res, i, i_prolang));
finfo[i].prorettype = atooid(PQgetvalue(res, i, i_prorettype));
else if (g_fout->remoteVersion >= 70200)
{
appendPQExpBuffer(query,
- "SELECT tableoid, oid, relname, relacl, relkind, "
+ "SELECT tableoid, oid, relname, relacl, relkind, "
"0::oid as relnamespace, "
"(select usename from pg_user where relowner = usesysid) as usename, "
"relchecks, reltriggers, "
{
/* all tables have oids in 7.1 */
appendPQExpBuffer(query,
- "SELECT tableoid, oid, relname, relacl, relkind, "
+ "SELECT tableoid, oid, relname, relacl, relkind, "
"0::oid as relnamespace, "
"(select usename from pg_user where relowner = usesysid) as usename, "
"relchecks, reltriggers, "
AssignDumpId(&tblinfo[i].dobj);
tblinfo[i].dobj.name = strdup(PQgetvalue(res, i, i_relname));
tblinfo[i].dobj.namespace = findNamespace(atooid(PQgetvalue(res, i, i_relnamespace)),
- tblinfo[i].dobj.catId.oid);
+ tblinfo[i].dobj.catId.oid);
tblinfo[i].usename = strdup(PQgetvalue(res, i, i_usename));
tblinfo[i].relacl = strdup(PQgetvalue(res, i, i_relacl));
tblinfo[i].relkind = *(PQgetvalue(res, i, i_relkind));
resetPQExpBuffer(lockquery);
appendPQExpBuffer(lockquery,
"LOCK TABLE %s IN ACCESS SHARE MODE",
- fmtQualifiedId(tblinfo[i].dobj.namespace->dobj.name,
- tblinfo[i].dobj.name));
+ fmtQualifiedId(tblinfo[i].dobj.namespace->dobj.name,
+ tblinfo[i].dobj.name));
do_sql_command(g_conn, lockquery->data);
}
/*
* If the user is attempting to dump a specific table, check to ensure
- * that the specified table actually exists. (This is a bit simplistic
- * since we don't fully check the combination of -n and -t switches.)
+ * that the specified table actually exists. (This is a bit
+ * simplistic since we don't fully check the combination of -n and -t
+ * switches.)
*/
if (selectTableName)
{
/*
* The point of the messy-looking outer join is to find a
* constraint that is related by an internal dependency link to
- * the index. If we find one, create a CONSTRAINT entry linked
- * to the INDEX entry. We assume an index won't have more than
- * one internal dependency.
+ * the index. If we find one, create a CONSTRAINT entry linked to
+ * the INDEX entry. We assume an index won't have more than one
+ * internal dependency.
*/
resetPQExpBuffer(query);
if (g_fout->remoteVersion >= 80000)
"c.contype, c.conname, "
"c.tableoid as contableoid, "
"c.oid as conoid, "
- "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) as tablespace "
+ "(SELECT spcname FROM pg_catalog.pg_tablespace s WHERE s.oid = t.reltablespace) as tablespace "
"FROM pg_catalog.pg_index i "
"JOIN pg_catalog.pg_class t ON (t.oid = i.indexrelid) "
"LEFT JOIN pg_catalog.pg_depend d "
appendPQExpBuffer(query,
"SELECT t.tableoid, t.oid, "
"t.relname as indexname, "
- "pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, false as indisclustered, "
"CASE WHEN i.indisprimary THEN 'p'::char "
"(SELECT oid FROM pg_class WHERE relname = 'pg_class') AS tableoid, "
"t.oid, "
"t.relname as indexname, "
- "pg_get_indexdef(i.indexrelid) as indexdef, "
+ "pg_get_indexdef(i.indexrelid) as indexdef, "
"t.relnatts as indnkeys, "
"i.indkey, false as indisclustered, "
"CASE WHEN i.indisprimary THEN 'p'::char "
/*
* In pre-7.4 releases, indkeys may contain more entries than
* indnkeys says (since indnkeys will be 1 for a functional
- * index). We don't actually care about this case since we don't
- * examine indkeys except for indexes associated with PRIMARY
- * and UNIQUE constraints, which are never functional indexes.
- * But we have to allocate enough space to keep parseOidArray
- * from complaining.
+ * index). We don't actually care about this case since we
+ * don't examine indkeys except for indexes associated with
+ * PRIMARY and UNIQUE constraints, which are never functional
+ * indexes. But we have to allocate enough space to keep
+ * parseOidArray from complaining.
*/
indxinfo[j].indkeys = (Oid *) malloc(INDEX_MAX_KEYS * sizeof(Oid));
parseOidArray(PQgetvalue(res, j, i_indkey),
resetPQExpBuffer(query);
appendPQExpBuffer(query,
"SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) as condef "
+ "pg_catalog.pg_get_constraintdef(oid) as condef "
"FROM pg_catalog.pg_constraint "
"WHERE conrelid = '%u'::pg_catalog.oid "
"AND contype = 'f'",
return;
/*
- * select appropriate schema to ensure names in constraint are properly
- * qualified
+ * select appropriate schema to ensure names in constraint are
+ * properly qualified
*/
selectSourceSchema(tinfo->dobj.namespace->dobj.name);
if (g_fout->remoteVersion >= 70400)
appendPQExpBuffer(query, "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc "
"FROM pg_catalog.pg_constraint "
"WHERE contypid = '%u'::pg_catalog.oid "
"ORDER BY conname",
constrinfo[i].conindex = 0;
constrinfo[i].coninherited = false;
constrinfo[i].separate = false;
+
/*
- * Make the domain depend on the constraint, ensuring it won't
- * be output till any constraint dependencies are OK.
+ * Make the domain depend on the constraint, ensuring it won't be
+ * output till any constraint dependencies are OK.
*/
addObjectDependency(&tinfo->dobj,
constrinfo[i].dobj.dumpId);
for (i = 0; i < ntups; i++)
{
- Oid ruletableoid;
+ Oid ruletableoid;
ruleinfo[i].dobj.objType = DO_RULE;
ruleinfo[i].dobj.catId.tableoid = atooid(PQgetvalue(res, i, i_tableoid));
if (ruleinfo[i].ruletable)
{
/*
- * If the table is a view, force its ON SELECT rule to be sorted
- * before the view itself --- this ensures that any dependencies
- * for the rule affect the table's positioning. Other rules
- * are forced to appear after their table.
+ * If the table is a view, force its ON SELECT rule to be
+ * sorted before the view itself --- this ensures that any
+ * dependencies for the rule affect the table's positioning.
+ * Other rules are forced to appear after their table.
*/
if (ruleinfo[i].ruletable->relkind == RELKIND_VIEW &&
ruleinfo[i].ev_type == '1' && ruleinfo[i].is_instead)
"tgfoid::pg_catalog.regproc as tgfname, "
"tgtype, tgnargs, tgargs, "
"tgisconstraint, tgconstrname, tgdeferrable, "
- "tgconstrrelid, tginitdeferred, tableoid, oid, "
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
"tgconstrrelid::pg_catalog.regclass as tgconstrrelname "
"from pg_catalog.pg_trigger t "
"where tgrelid = '%u'::pg_catalog.oid "
"SELECT tgname, tgfoid::regproc as tgfname, "
"tgtype, tgnargs, tgargs, "
"tgisconstraint, tgconstrname, tgdeferrable, "
- "tgconstrrelid, tginitdeferred, tableoid, oid, "
+ "tgconstrrelid, tginitdeferred, tableoid, oid, "
"(select relname from pg_class where oid = tgconstrrelid) "
" as tgconstrrelname "
"from pg_trigger "
planginfo[i].lanvalidator = InvalidOid;
planginfo[i].lanacl = strdup("{=U}");
+
/*
- * We need to make a dependency to ensure the function will
- * be dumped first. (In 7.3 and later the regular dependency
+ * We need to make a dependency to ensure the function will be
+ * dumped first. (In 7.3 and later the regular dependency
* mechanism will handle this for us.)
*/
funcInfo = findFuncByOid(planginfo[i].lanplcallfoid);
"FROM pg_type t1, pg_type t2, pg_proc p "
"WHERE p.pronargs = 1 AND "
"p.proargtypes[0] = t1.oid AND "
- "p.prorettype = t2.oid AND p.proname = t2.typname "
+ "p.prorettype = t2.oid AND p.proname = t2.typname "
"ORDER BY 3,4");
}
for (i = 0; i < ntups; i++)
{
- PQExpBufferData namebuf;
+ PQExpBufferData namebuf;
TypeInfo *sTypeInfo;
TypeInfo *tTypeInfo;
if (OidIsValid(castinfo[i].castfunc))
{
/*
- * We need to make a dependency to ensure the function will
- * be dumped first. (In 7.3 and later the regular dependency
+ * We need to make a dependency to ensure the function will be
+ * dumped first. (In 7.3 and later the regular dependency
* mechanism will handle this for us.)
*/
FuncInfo *funcInfo;
tbinfo->attislocal[j] = (PQgetvalue(res, j, i_attislocal)[0] == 't');
tbinfo->attisserial[j] = false; /* fix below */
tbinfo->notnull[j] = (PQgetvalue(res, j, i_attnotnull)[0] == 't');
- tbinfo->attrdefs[j] = NULL; /* fix below */
+ tbinfo->attrdefs[j] = NULL; /* fix below */
if (PQgetvalue(res, j, i_atthasdef)[0] == 't')
hasdefaults = true;
/* these flags will be set in flagInhAttrs() */
for (j = 0; j < numDefaults; j++)
{
- int adnum;
+ int adnum;
attrdefs[j].dobj.objType = DO_ATTRDEF;
attrdefs[j].dobj.catId.tableoid = atooid(PQgetvalue(res, j, 0));
/*
* Defaults on a VIEW must always be dumped as separate
* ALTER TABLE commands. Defaults on regular tables are
- * dumped as part of the CREATE TABLE if possible. To check
- * if it's safe, we mark the default as needing to appear
- * before the CREATE.
+ * dumped as part of the CREATE TABLE if possible. To
+ * check if it's safe, we mark the default as needing to
+ * appear before the CREATE.
*/
if (tbinfo->relkind == RELKIND_VIEW)
{
if (g_fout->remoteVersion >= 70400)
{
appendPQExpBuffer(q, "SELECT tableoid, oid, conname, "
- "pg_catalog.pg_get_constraintdef(oid) AS consrc "
+ "pg_catalog.pg_get_constraintdef(oid) AS consrc "
"FROM pg_catalog.pg_constraint "
"WHERE conrelid = '%u'::pg_catalog.oid "
" AND contype = 'c' "
constrs[j].separate = false;
addObjectDependency(&tbinfo->dobj,
constrs[j].dobj.dumpId);
+
/*
* If the constraint is inherited, this will be detected
* later. We also detect later if the constraint must be
ArchiveEntry(fout, nilCatalogId, createDumpId(),
target->data,
- tbinfo->dobj.namespace->dobj.name, tbinfo->usename,
+ tbinfo->dobj.namespace->dobj.name, tbinfo->usename,
false, "COMMENT", query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
ArchiveEntry(fout, nilCatalogId, createDumpId(),
target->data,
- tbinfo->dobj.namespace->dobj.name, tbinfo->usename,
+ tbinfo->dobj.namespace->dobj.name, tbinfo->usename,
false, "COMMENT", query->data, "", NULL,
&(tbinfo->dobj.dumpId), 1,
NULL, NULL);
ncomments = collectComments(fout, &comments);
/*
- * Pre-7.2, pg_description does not contain classoid, so collectComments
- * just stores a zero. If there's a collision on object OID, well, you
- * get duplicate comments.
+ * Pre-7.2, pg_description does not contain classoid, so
+ * collectComments just stores a zero. If there's a collision on
+ * object OID, well, you get duplicate comments.
*/
if (fout->remoteVersion < 70200)
classoid = 0;
* Do binary search to find some item matching the object.
*/
low = &comments[0];
- high = &comments[ncomments-1];
+ high = &comments[ncomments - 1];
while (low <= high)
{
middle = low + (high - low) / 2;
qnspname = strdup(fmtId(nspinfo->dobj.name));
/*
- * Note that ownership is shown in the AUTHORIZATION clause,
- * while the archive entry is listed with empty owner (causing
- * it to be emitted with SET SESSION AUTHORIZATION DEFAULT).
- * This seems the best way of dealing with schemas owned by
- * users without CREATE SCHEMA privilege. Further hacking has
- * to be applied for --no-owner mode, though!
+ * Note that ownership is shown in the AUTHORIZATION clause, while the
+ * archive entry is listed with empty owner (causing it to be emitted
+ * with SET SESSION AUTHORIZATION DEFAULT). This seems the best way of
+ * dealing with schemas owned by users without CREATE SCHEMA
+ * privilege. Further hacking has to be applied for --no-owner mode,
+ * though!
*/
appendPQExpBuffer(delq, "DROP SCHEMA %s;\n", qnspname);
if (!domcheck->separate)
appendPQExpBuffer(q, "\n\tCONSTRAINT %s %s",
- fmtId(domcheck->dobj.name), domcheck->condef);
+ fmtId(domcheck->dobj.name), domcheck->condef);
}
appendPQExpBuffer(q, ";\n");
return;
/*
- * Current theory is to dump PLs iff their underlying functions
- * will be dumped (are in a dumpable namespace, or have a
- * non-system OID in pre-7.3 databases). Actually, we treat the
- * PL itself as being in the underlying function's namespace,
- * though it isn't really. This avoids searchpath problems for
- * the HANDLER clause.
+ * Current theory is to dump PLs iff their underlying functions will
+ * be dumped (are in a dumpable namespace, or have a non-system OID in
+ * pre-7.3 databases). Actually, we treat the PL itself as being in
+ * the underlying function's namespace, though it isn't really. This
+ * avoids searchpath problems for the HANDLER clause.
*
- * If the underlying function is in the pg_catalog namespace,
- * we won't have loaded it into finfo[] at all; therefore,
- * treat failure to find it in finfo[] as indicating we shouldn't
- * dump it, not as an error condition. Ditto for the validator.
+ * If the underlying function is in the pg_catalog namespace, we won't
+ * have loaded it into finfo[] at all; therefore, treat failure to
+ * find it in finfo[] as indicating we shouldn't dump it, not as an
+ * error condition. Ditto for the validator.
*/
funcInfo = findFuncByOid(plang->lanplcallfoid);
/* Cope with possibility that validator is in different schema */
if (validatorInfo->dobj.namespace != funcInfo->dobj.namespace)
appendPQExpBuffer(defqry, "%s.",
- fmtId(validatorInfo->dobj.namespace->dobj.name));
+ fmtId(validatorInfo->dobj.namespace->dobj.name));
appendPQExpBuffer(defqry, "%s",
fmtId(validatorInfo->dobj.name));
}
if (strcmp(prosrc, "-") != 0)
{
appendPQExpBuffer(asPart, ", ");
+
/*
* where we have bin, use dollar quoting if allowed and src
* contains quote or backslash; else use regular quoting.
if (proargnames && *proargnames)
{
- int nitems = 0;
+ int nitems = 0;
if (!parsePGArray(proargnames, &argnamearray, &nitems) ||
nitems != finfo->nargs)
* As per discussion we dump casts if one or more of the underlying
* objects (the conversion function and the two data types) are not
* builtin AND if all of the non-builtin objects namespaces are
- * included in the dump. Builtin meaning, the namespace name does
- * not start with "pg_".
+ * included in the dump. Builtin meaning, the namespace name does not
+ * start with "pg_".
*/
sourceInfo = findTypeByOid(cast->castsource);
targetInfo = findTypeByOid(cast->casttarget);
return;
/*
- * Skip cast if function isn't from pg_ and that namespace is
- * not dumped.
+ * Skip cast if function isn't from pg_ and that namespace is not
+ * dumped.
*/
if (funcInfo &&
strncmp(funcInfo->dobj.namespace->dobj.name, "pg_", 3) != 0 &&
else
{
/*
- * Always qualify the function name, in case it is not in pg_catalog
- * schema (format_function_signature won't qualify it).
+ * Always qualify the function name, in case it is not in
+ * pg_catalog schema (format_function_signature won't qualify it).
*/
appendPQExpBuffer(defqry, "WITH FUNCTION %s.",
fmtId(funcInfo->dobj.namespace->dobj.name));
appendPQExpBuffer(defqry, "%s",
- format_function_signature(funcInfo, NULL, true));
+ format_function_signature(funcInfo, NULL, true));
}
if (cast->castcontext == 'a')
static const char *
convertOperatorReference(const char *opr)
{
- OprInfo *oprInfo;
+ OprInfo *oprInfo;
/* In all cases "0" means a null reference */
if (strcmp(opr, "0") == 0)
/* Get conversion-specific details */
appendPQExpBuffer(query, "SELECT conname, "
- "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
- "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
+ "pg_catalog.pg_encoding_to_char(conforencoding) AS conforencoding, "
+ "pg_catalog.pg_encoding_to_char(contoencoding) AS contoencoding, "
"conproc, condefault "
"FROM pg_catalog.pg_conversion c "
"WHERE c.oid = '%u'::pg_catalog.oid",
fmtId(convinfo->dobj.name));
appendPQExpBuffer(q, "CREATE %sCONVERSION %s FOR ",
- (condefault) ? "DEFAULT " : "",
- fmtId(convinfo->dobj.name));
+ (condefault) ? "DEFAULT " : "",
+ fmtId(convinfo->dobj.name));
appendStringLiteral(q, conforencoding, true);
appendPQExpBuffer(q, " TO ");
appendStringLiteral(q, contoencoding, true);
ArchiveEntry(fout, agginfo->aggfn.dobj.catId, agginfo->aggfn.dobj.dumpId,
aggsig_tag,
- agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.usename,
+ agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.usename,
false, "AGGREGATE", q->data, delq->data, NULL,
- agginfo->aggfn.dobj.dependencies, agginfo->aggfn.dobj.nDeps,
+ agginfo->aggfn.dobj.dependencies, agginfo->aggfn.dobj.nDeps,
NULL, NULL);
/* Dump Aggregate Comments */
resetPQExpBuffer(q);
appendPQExpBuffer(q, "AGGREGATE %s", aggsig);
dumpComment(fout, q->data,
- agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.usename,
+ agginfo->aggfn.dobj.namespace->dobj.name, agginfo->aggfn.usename,
agginfo->aggfn.dobj.catId, 0, agginfo->aggfn.dobj.dumpId);
/*
* Since there is no GRANT ON AGGREGATE syntax, we have to make the
- * ACL command look like a function's GRANT; in particular this affects
- * the syntax for aggregates on ANY.
+ * ACL command look like a function's GRANT; in particular this
+ * affects the syntax for aggregates on ANY.
*/
free(aggsig);
free(aggsig_tag);
}
/*
- * Default value --- suppress if inherited, serial,
- * or to be printed separately.
+ * Default value --- suppress if inherited, serial, or to
+ * be printed separately.
*/
if (tbinfo->attrdefs[j] != NULL &&
!tbinfo->inhAttrDef[j] &&
appendPQExpBuffer(q, ", ");
if (parentRel->dobj.namespace != tbinfo->dobj.namespace)
appendPQExpBuffer(q, "%s.",
- fmtId(parentRel->dobj.namespace->dobj.name));
+ fmtId(parentRel->dobj.namespace->dobj.name));
appendPQExpBuffer(q, "%s",
fmtId(parentRel->dobj.name));
}
ArchiveEntry(fout, tbinfo->dobj.catId, tbinfo->dobj.dumpId,
tbinfo->dobj.name,
tbinfo->dobj.namespace->dobj.name, tbinfo->usename,
- (strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
+ (strcmp(reltypename, "TABLE") == 0) ? tbinfo->hasoids : false,
reltypename, q->data, delq->data, NULL,
tbinfo->dobj.dependencies, tbinfo->dobj.nDeps,
NULL, NULL);
return;
/* Don't print inherited or serial defaults, either */
- if (tbinfo->inhAttrDef[adnum-1] || tbinfo->attisserial[adnum-1])
+ if (tbinfo->inhAttrDef[adnum - 1] || tbinfo->attisserial[adnum - 1])
return;
q = createPQExpBuffer();
adinfo->adef_expr);
/*
- * DROP must be fully qualified in case same name appears
- * in pg_catalog
+ * DROP must be fully qualified in case same name appears in
+ * pg_catalog
*/
appendPQExpBuffer(delq, "ALTER TABLE %s.",
fmtId(tbinfo->dobj.namespace->dobj.name));
}
/*
- * DROP must be fully qualified in case same name appears
- * in pg_catalog
+ * DROP must be fully qualified in case same name appears in
+ * pg_catalog
*/
appendPQExpBuffer(delq, "DROP INDEX %s.",
fmtId(tbinfo->dobj.namespace->dobj.name));
fmtId(tbinfo->dobj.name));
appendPQExpBuffer(q, " ADD CONSTRAINT %s %s (",
fmtId(coninfo->dobj.name),
- coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
+ coninfo->contype == 'p' ? "PRIMARY KEY" : "UNIQUE");
for (k = 0; k < indxinfo->indnkeys; k++)
{
}
/*
- * DROP must be fully qualified in case same name appears
- * in pg_catalog
+ * DROP must be fully qualified in case same name appears in
+ * pg_catalog
*/
appendPQExpBuffer(delq, "ALTER TABLE ONLY %s.",
fmtId(tbinfo->dobj.namespace->dobj.name));
* find the last built in oid
*
* For 7.0, we do this by assuming that the last thing that initdb does is to
- * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
+ * create the pg_indexes view. This sucks in general, but seeing that 7.0.x
* initdb won't be changing anymore, it'll do.
*/
static Oid
res = PQexec(g_conn,
"SELECT oid FROM pg_class WHERE relname = 'pg_indexes'");
check_sql_result(res, g_conn,
- "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'",
+ "SELECT oid FROM pg_class WHERE relname = 'pg_indexes'",
PGRES_TUPLES_OK);
ntups = PQntuples(res);
if (ntups < 1)
if (!schemaOnly)
{
- TableInfo *owning_tab;
+ TableInfo *owning_tab;
resetPQExpBuffer(query);
appendPQExpBuffer(query, "SELECT pg_catalog.setval(");
+
/*
- * If this is a SERIAL sequence, then use the pg_get_serial_sequence
- * function to avoid hard-coding the sequence name. Note that this
- * implicitly assumes that the sequence and its owning table are in
- * the same schema, because we don't schema-qualify the reference.
+ * If this is a SERIAL sequence, then use the
+ * pg_get_serial_sequence function to avoid hard-coding the
+ * sequence name. Note that this implicitly assumes that the
+ * sequence and its owning table are in the same schema, because
+ * we don't schema-qualify the reference.
*/
if (OidIsValid(tbinfo->owning_tab) &&
(owning_tab = findTableByOid(tbinfo->owning_tab)) != NULL)
appendPQExpBuffer(query, "pg_catalog.pg_get_serial_sequence(");
appendStringLiteral(query, fmtId(owning_tab->dobj.name), true);
appendPQExpBuffer(query, ", ");
- appendStringLiteral(query, owning_tab->attnames[tbinfo->owning_col-1], true);
+ appendStringLiteral(query, owning_tab->attnames[tbinfo->owning_col - 1], true);
appendPQExpBuffer(query, ")");
}
else
return;
/*
- * If it is an ON SELECT rule, we do not need to dump it because
- * it will be handled via CREATE VIEW for the table.
+ * If it is an ON SELECT rule, we do not need to dump it because it
+ * will be handled via CREATE VIEW for the table.
*/
if (rinfo->ev_type == '1' && rinfo->is_instead)
return;
dobj = findObjectByCatalogId(objId);
/*
- * Failure to find objects mentioned in pg_depend is not unexpected,
- * since for example we don't collect info about TOAST tables.
+ * Failure to find objects mentioned in pg_depend is not
+ * unexpected, since for example we don't collect info about TOAST
+ * tables.
*/
if (dobj == NULL)
{
myFormatType(const char *typname, int32 typmod)
{
char *result;
- bool isarray = false;
+ bool isarray = false;
PQExpBuffer buf = createPQExpBuffer();
/* Handle array types */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.111 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.h,v 1.112 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
Oid oid;
} CatalogId;
-typedef int DumpId;
+typedef int DumpId;
/*
typedef struct _typeInfo
{
DumpableObject dobj;
+
/*
* Note: dobj.name is the pg_type.typname entry. format_type() might
* produce something different than typname
bool *attisserial; /* true if attr is serial or bigserial */
/*
- * Note: we need to store per-attribute notnull, default, and constraint
- * stuff for all interesting tables so that we can tell which constraints
- * were inherited.
+ * Note: we need to store per-attribute notnull, default, and
+ * constraint stuff for all interesting tables so that we can tell
+ * which constraints were inherited.
*/
bool *notnull; /* Not null constraints on attributes */
- struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
+ struct _attrDefInfo **attrdefs; /* DEFAULT expressions */
bool *inhAttrs; /* true if each attribute is inherited */
bool *inhAttrDef; /* true if attr's default is inherited */
bool *inhNotNull; /* true if NOT NULL is inherited */
- struct _constraintInfo *checkexprs; /* CHECK constraints */
+ struct _constraintInfo *checkexprs; /* CHECK constraints */
/*
* Stuff computed only for dumpable tables.
*/
int numParents; /* number of (immediate) parent tables */
- struct _tableInfo **parents; /* TableInfos of immediate parents */
+ struct _tableInfo **parents; /* TableInfos of immediate parents */
} TableInfo;
typedef struct _attrDefInfo
} TriggerInfo;
/*
- * struct ConstraintInfo is used for all constraint types. However we
+ * struct ConstraintInfo is used for all constraint types. However we
* use a different objType for foreign key constraints, to make it easier
* to sort them the way we want.
*/
*/
extern TableInfo *getSchemaData(int *numTablesPtr,
- const bool schemaOnly,
- const bool dataOnly);
+ const bool schemaOnly,
+ const bool dataOnly);
typedef enum _OidOptions
{
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.5 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump_sort.c,v 1.6 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Sort priority for object types when dumping a pre-7.3 database.
* Objects are sorted by priority levels, and within an equal priority level
- * by OID. (This is a relatively crude hack to provide semi-reasonable
+ * by OID. (This is a relatively crude hack to provide semi-reasonable
* behavior for old databases without full dependency info.)
*/
static const int oldObjectTypePriority[] =
{
- 1, /* DO_NAMESPACE */
- 2, /* DO_TYPE */
- 2, /* DO_FUNC */
- 2, /* DO_AGG */
- 3, /* DO_OPERATOR */
- 4, /* DO_OPCLASS */
- 5, /* DO_CONVERSION */
- 6, /* DO_TABLE */
- 8, /* DO_ATTRDEF */
- 12, /* DO_INDEX */
- 13, /* DO_RULE */
- 14, /* DO_TRIGGER */
- 11, /* DO_CONSTRAINT */
- 15, /* DO_FK_CONSTRAINT */
- 2, /* DO_PROCLANG */
- 2, /* DO_CAST */
- 9, /* DO_TABLE_DATA */
- 7, /* DO_TABLE_TYPE */
- 10 /* DO_BLOBS */
+ 1, /* DO_NAMESPACE */
+ 2, /* DO_TYPE */
+ 2, /* DO_FUNC */
+ 2, /* DO_AGG */
+ 3, /* DO_OPERATOR */
+ 4, /* DO_OPCLASS */
+ 5, /* DO_CONVERSION */
+ 6, /* DO_TABLE */
+ 8, /* DO_ATTRDEF */
+ 12, /* DO_INDEX */
+ 13, /* DO_RULE */
+ 14, /* DO_TRIGGER */
+ 11, /* DO_CONSTRAINT */
+ 15, /* DO_FK_CONSTRAINT */
+ 2, /* DO_PROCLANG */
+ 2, /* DO_CAST */
+ 9, /* DO_TABLE_DATA */
+ 7, /* DO_TABLE_TYPE */
+ 10 /* DO_BLOBS */
};
/*
*/
static const int newObjectTypePriority[] =
{
- 1, /* DO_NAMESPACE */
- 3, /* DO_TYPE */
- 4, /* DO_FUNC */
- 5, /* DO_AGG */
- 6, /* DO_OPERATOR */
- 7, /* DO_OPCLASS */
- 9, /* DO_CONVERSION */
- 10, /* DO_TABLE */
- 12, /* DO_ATTRDEF */
- 16, /* DO_INDEX */
- 17, /* DO_RULE */
- 18, /* DO_TRIGGER */
- 15, /* DO_CONSTRAINT */
- 19, /* DO_FK_CONSTRAINT */
- 2, /* DO_PROCLANG */
- 8, /* DO_CAST */
- 13, /* DO_TABLE_DATA */
- 11, /* DO_TABLE_TYPE */
- 14 /* DO_BLOBS */
+ 1, /* DO_NAMESPACE */
+ 3, /* DO_TYPE */
+ 4, /* DO_FUNC */
+ 5, /* DO_AGG */
+ 6, /* DO_OPERATOR */
+ 7, /* DO_OPCLASS */
+ 9, /* DO_CONVERSION */
+ 10, /* DO_TABLE */
+ 12, /* DO_ATTRDEF */
+ 16, /* DO_INDEX */
+ 17, /* DO_RULE */
+ 18, /* DO_TRIGGER */
+ 15, /* DO_CONSTRAINT */
+ 19, /* DO_FK_CONSTRAINT */
+ 2, /* DO_PROCLANG */
+ 8, /* DO_CAST */
+ 13, /* DO_TABLE_DATA */
+ 11, /* DO_TABLE_TYPE */
+ 14 /* DO_BLOBS */
};
static int DOTypeNameCompare(const void *p1, const void *p2);
static int DOTypeOidCompare(const void *p1, const void *p2);
static bool TopoSort(DumpableObject **objs,
- int numObjs,
- DumpableObject **ordering,
- int *nOrdering);
+ int numObjs,
+ DumpableObject **ordering,
+ int *nOrdering);
static void addHeapElement(int val, int *heap, int heapLength);
static int removeHeapElement(int *heap, int heapLength);
static void findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs);
static bool findLoop(DumpableObject *obj,
- DumpId startPoint,
- DumpableObject **workspace,
- int depth,
- int *newDepth);
+ DumpId startPoint,
+ DumpableObject **workspace,
+ int depth,
+ int *newDepth);
static void repairDependencyLoop(DumpableObject **loop,
- int nLoop);
+ int nLoop);
static void describeDumpableObject(DumpableObject *obj,
- char *buf, int bufsize);
+ char *buf, int bufsize);
/*
void
sortDumpableObjects(DumpableObject **objs, int numObjs)
{
- DumpableObject **ordering;
+ DumpableObject **ordering;
int nOrdering;
if (numObjs <= 0)
* TopoSort -- topological sort of a dump list
*
* Generate a re-ordering of the dump list that satisfies all the dependency
- * constraints shown in the dump list. (Each such constraint is a fact of a
+ * constraints shown in the dump list. (Each such constraint is a fact of a
* partial ordering.) Minimize rearrangement of the list not needed to
* achieve the partial ordering.
*
- * The input is the list of numObjs objects in objs[]. This list is not
+ * The input is the list of numObjs objects in objs[]. This list is not
* modified.
*
* Returns TRUE if able to build an ordering that satisfies all the
TopoSort(DumpableObject **objs,
int numObjs,
DumpableObject **ordering, /* output argument */
- int *nOrdering) /* output argument */
+ int *nOrdering) /* output argument */
{
DumpId maxDumpId = getMaxDumpId();
int *pendingHeap;
int *beforeConstraints;
int *idMap;
- DumpableObject *obj;
+ DumpableObject *obj;
int heapLength;
int i,
j,
k;
/*
- * This is basically the same algorithm shown for topological sorting in
- * Knuth's Volume 1. However, we would like to minimize unnecessary
- * rearrangement of the input ordering; that is, when we have a choice
- * of which item to output next, we always want to take the one highest
- * in the original list. Therefore, instead of maintaining an unordered
- * linked list of items-ready-to-output as Knuth does, we maintain a heap
- * of their item numbers, which we can use as a priority queue. This
- * turns the algorithm from O(N) to O(N log N) because each insertion or
- * removal of a heap item takes O(log N) time. However, that's still
- * plenty fast enough for this application.
+ * This is basically the same algorithm shown for topological sorting
+ * in Knuth's Volume 1. However, we would like to minimize
+ * unnecessary rearrangement of the input ordering; that is, when we
+ * have a choice of which item to output next, we always want to take
+ * the one highest in the original list. Therefore, instead of
+ * maintaining an unordered linked list of items-ready-to-output as
+ * Knuth does, we maintain a heap of their item numbers, which we can
+ * use as a priority queue. This turns the algorithm from O(N) to O(N
+ * log N) because each insertion or removal of a heap item takes O(log
+ * N) time. However, that's still plenty fast enough for this
+ * application.
*/
- *nOrdering = numObjs; /* for success return */
+ *nOrdering = numObjs; /* for success return */
/* Eliminate the null case */
if (numObjs <= 0)
/*
* Scan the constraints, and for each item in the input, generate a
* count of the number of constraints that say it must be before
- * something else. The count for the item with dumpId j is
- * stored in beforeConstraints[j]. We also make a map showing the
- * input-order index of the item with dumpId j.
+ * something else. The count for the item with dumpId j is stored in
+ * beforeConstraints[j]. We also make a map showing the input-order
+ * index of the item with dumpId j.
*/
beforeConstraints = (int *) malloc((maxDumpId + 1) * sizeof(int));
if (beforeConstraints == NULL)
* the indexes of items that already have beforeConstraints[id] == 0.
*
* The essential property of a heap is heap[(j-1)/2] >= heap[j] for each
- * j in the range 1..heapLength-1 (note we are using 0-based subscripts
- * here, while the discussion in Knuth assumes 1-based subscripts).
- * So, if we simply enter the indexes into pendingHeap[] in decreasing
- * order, we a-fortiori have the heap invariant satisfied at completion
- * of this loop, and don't need to do any sift-up comparisons.
+ * j in the range 1..heapLength-1 (note we are using 0-based
+ * subscripts here, while the discussion in Knuth assumes 1-based
+ * subscripts). So, if we simply enter the indexes into pendingHeap[]
+ * in decreasing order, we a-fortiori have the heap invariant
+ * satisfied at completion of this loop, and don't need to do any
+ * sift-up comparisons.
*/
heapLength = 0;
- for (i = numObjs; --i >= 0; )
+ for (i = numObjs; --i >= 0;)
{
if (beforeConstraints[objs[i]->dumpId] == 0)
pendingHeap[heapLength++] = i;
}
/*--------------------
- * Now emit objects, working backwards in the output list. At each step,
+ * Now emit objects, working backwards in the output list. At each step,
* we use the priority heap to select the last item that has no remaining
- * before-constraints. We remove that item from the heap, output it to
+ * before-constraints. We remove that item from the heap, output it to
* ordering[], and decrease the beforeConstraints count of each of the
* items it was constrained against. Whenever an item's beforeConstraints
* count is thereby decreased to zero, we insert it into the priority heap
/* Update beforeConstraints counts of its predecessors */
for (k = 0; k < obj->nDeps; k++)
{
- int id = obj->dependencies[k];
+ int id = obj->dependencies[k];
if ((--beforeConstraints[id]) == 0)
addHeapElement(idMap[id], pendingHeap, heapLength++);
* before trying TopoSort again. We can safely repair loops that are
* disjoint (have no members in common); if we find overlapping loops
* then we repair only the first one found, because the action taken to
- * repair the first might have repaired the other as well. (If not,
+ * repair the first might have repaired the other as well. (If not,
* we'll fix it on the next go-round.)
*
* objs[] lists the objects TopoSort couldn't sort
findDependencyLoops(DumpableObject **objs, int nObjs, int totObjs)
{
/*
- * We use a workspace array, the initial part of which stores
- * objects already processed, and the rest of which is used as
- * temporary space to try to build a loop in. This is convenient
- * because we do not care about loops involving already-processed
- * objects (see notes above); we can easily reject such loops in
- * findLoop() because of this representation. After we identify
- * and process a loop, we can add it to the initial part of the
- * workspace just by moving the boundary pointer.
+ * We use a workspace array, the initial part of which stores objects
+ * already processed, and the rest of which is used as temporary space
+ * to try to build a loop in. This is convenient because we do not
+ * care about loops involving already-processed objects (see notes
+ * above); we can easily reject such loops in findLoop() because of
+ * this representation. After we identify and process a loop, we can
+ * add it to the initial part of the workspace just by moving the
+ * boundary pointer.
*
- * When we determine that an object is not part of any interesting
- * loop, we also add it to the initial part of the workspace. This
- * is not necessary for correctness, but saves later invocations of
+ * When we determine that an object is not part of any interesting loop,
+ * we also add it to the initial part of the workspace. This is not
+ * necessary for correctness, but saves later invocations of
* findLoop() from uselessly chasing references to such an object.
*
- * We make the workspace large enough to hold all objects in the
- * original universe. This is probably overkill, but it's provably
- * enough space...
+ * We make the workspace large enough to hold all objects in the original
+ * universe. This is probably overkill, but it's provably enough
+ * space...
*/
- DumpableObject **workspace;
+ DumpableObject **workspace;
int initiallen;
bool fixedloop;
int i;
for (i = 0; i < nObjs; i++)
{
DumpableObject *obj = objs[i];
- int newlen;
+ int newlen;
- workspace[initiallen] = NULL; /* see test below */
+ workspace[initiallen] = NULL; /* see test below */
if (findLoop(obj, obj->dumpId, workspace, initiallen, &newlen))
{
else
{
/*
- * Didn't find a loop, but add this object to workspace anyway,
- * unless it's already present. We piggyback on the test that
- * findLoop() already did: it won't have tentatively added obj
- * to workspace if it's already present.
+ * Didn't find a loop, but add this object to workspace
+ * anyway, unless it's already present. We piggyback on the
+ * test that findLoop() already did: it won't have tentatively
+ * added obj to workspace if it's already present.
*/
if (workspace[initiallen] == obj)
initiallen++;
if (workspace[i] == obj)
return false;
}
+
/*
* Okay, tentatively add obj to workspace
*/
workspace[depth++] = obj;
+
/*
- * See if we've found a loop back to the desired startPoint; if so, done
+ * See if we've found a loop back to the desired startPoint; if so,
+ * done
*/
for (i = 0; i < obj->nDeps; i++)
{
return true;
}
}
+
/*
* Recurse down each outgoing branch
*/
if (inputFuncInfo == NULL)
return;
addObjectDependency(funcobj, inputFuncInfo->dobj.dumpId);
+
/*
* Make sure the input function's dependency on type gets removed too;
* if it hasn't been done yet, we'd end up with loops involving the
write_msg(modulename, "WARNING: could not resolve dependency loop among these items:\n");
for (i = 0; i < nLoop; i++)
{
- char buf[1024];
+ char buf[1024];
describeDumpableObject(loop[i], buf, sizeof(buf));
write_msg(modulename, " %s\n", buf);
* Portions Copyright (c) 1994, Regents of the University of California
*
*
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.50 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dumpall.c,v 1.51 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
const char *pguser, bool require_password);
static PGresult *executeQuery(PGconn *conn, const char *query);
-char pg_dump_bin[MAXPGPATH];
+char pg_dump_bin[MAXPGPATH];
PQExpBuffer pgdumpopts;
bool output_clean = false;
bool skip_acls = false;
int server_version;
/* flags for -X long options */
-int disable_dollar_quoting = 0;
-int disable_triggers = 0;
-int use_setsessauth = 0;
+int disable_dollar_quoting = 0;
+int disable_triggers = 0;
+int use_setsessauth = 0;
int
main(int argc, char *argv[])
bool globals_only = false;
bool schema_only = false;
PGconn *conn;
- int c, ret;
+ int c,
+ ret;
static struct option long_options[] = {
{"data-only", no_argument, NULL, 'a'},
{
if (ret == -1)
fprintf(stderr,
- _("The program \"pg_dump\" is needed by %s "
- "but was not found in the same directory as \"%s\".\n"
- "Check your installation.\n"),
- progname, progname);
+ _("The program \"pg_dump\" is needed by %s "
+ "but was not found in the same directory as \"%s\".\n"
+ "Check your installation.\n"),
+ progname, progname);
else
fprintf(stderr,
- _("The program \"pg_dump\" was found by %s "
- "but was not the same version as \"%s\".\n"
- "Check your installation.\n"),
- progname, progname);
+ _("The program \"pg_dump\" was found by %s "
+ "but was not the same version as \"%s\".\n"
+ "Check your installation.\n"),
+ progname, progname);
exit(1);
}
else if (strcmp(optarg, "disable-triggers") == 0)
appendPQExpBuffer(pgdumpopts, " -X disable-triggers");
else if (strcmp(optarg, "use-set-session-authorization") == 0)
- /* no-op, still allowed for compatibility */ ;
+ /* no-op, still allowed for compatibility */ ;
else
{
fprintf(stderr,
}
}
- /* Add long options to the pg_dump argument list */
+ /* Add long options to the pg_dump argument list */
if (disable_dollar_quoting)
appendPQExpBuffer(pgdumpopts, " -X disable-dollar-quoting");
if (disable_triggers)
appendPQExpBuffer(pgdumpopts, " -X disable-triggers");
if (use_setsessauth)
appendPQExpBuffer(pgdumpopts, " -X use-set-session-authorization");
-
+
if (optind < argc)
{
fprintf(stderr, _("%s: too many command-line arguments (first is \"%s\")\n"),
printf("--\n-- PostgreSQL database cluster dump\n--\n\n");
if (verbose)
- dumpTimestamp("Started on");
+ dumpTimestamp("Started on");
printf("\\connect \"template1\"\n\n");
printf(_(" -X disable-dollar-quoting, --disable-dollar-quoting\n"
" disable dollar quoting, use SQL standard quoting\n"));
printf(_(" -X disable-triggers, --disable-triggers\n"
- " disable triggers during data-only restore\n"));
+ " disable triggers during data-only restore\n"));
printf(_(" -X use-set-session-authorization, --use-set-session-authorization\n"
" use SESSION AUTHORIZATION commands instead of\n"
" OWNER TO commands\n"));
for (i = 0; i < PQntuples(res); i++)
{
const char *username;
- bool clusterowner;
+ bool clusterowner;
PQExpBuffer buf = createPQExpBuffer();
+
username = PQgetvalue(res, i, 0);
clusterowner = (strcmp(PQgetvalue(res, i, 6), "t") == 0);
/* Check which pass we're on */
- if ((initdbonly && !clusterowner) || (!initdbonly && clusterowner)) continue;
+ if ((initdbonly && !clusterowner) || (!initdbonly && clusterowner))
+ continue;
- /* Dump ALTER USER for the cluster owner and CREATE USER for all other users */
+ /*
+ * Dump ALTER USER for the cluster owner and CREATE USER for all
+ * other users
+ */
if (!clusterowner)
appendPQExpBuffer(buf, "CREATE USER %s WITH SYSID %s",
fmtId(username),
* pg_xxx)
*/
res = executeQuery(conn, "SELECT spcname, "
- "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
+ "pg_catalog.pg_get_userbyid(spcowner) AS spcowner, "
"spclocation, spcacl "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname NOT LIKE 'pg\\_%'");
-
+
if (PQntuples(res) > 0)
printf("--\n-- Tablespaces\n--\n\n");
for (i = 0; i < PQntuples(res); i++)
{
PQExpBuffer buf = createPQExpBuffer();
- char *spcname = PQgetvalue(res, i, 0);
- char *spcowner = PQgetvalue(res, i, 1);
- char *spclocation = PQgetvalue(res, i, 2);
- char *spcacl = PQgetvalue(res, i, 3);
- char *fspcname;
+ char *spcname = PQgetvalue(res, i, 0);
+ char *spcowner = PQgetvalue(res, i, 1);
+ char *spclocation = PQgetvalue(res, i, 2);
+ char *spcacl = PQgetvalue(res, i, 3);
+ char *fspcname;
/* needed for buildACLCommands() */
fspcname = strdup(fmtId(spcname));
*pos = 0;
appendPQExpBuffer(buf, "ALTER %s %s ", type, fmtId(name));
appendPQExpBuffer(buf, "SET %s TO ", fmtId(mine));
- /* Some GUC variable names are 'LIST' type and hence must not be quoted. */
+
+ /*
+ * Some GUC variable names are 'LIST' type and hence must not be
+ * quoted.
+ */
if (strcasecmp(mine, "DateStyle") == 0
- || strcasecmp(mine, "search_path") == 0)
+ || strcasecmp(mine, "search_path") == 0)
appendPQExpBuffer(buf, "%s", pos + 1);
- else
+ else
appendStringLiteral(buf, pos + 1, false);
appendPQExpBuffer(buf, ";\n");
int ret;
/*
- * Win32 has to use double-quotes for args, rather than single quotes.
- * Strangely enough, this is the only place we pass a database name
- * on the command line, except template1 that doesn't need quoting.
- */
+ * Win32 has to use double-quotes for args, rather than single quotes.
+ * Strangely enough, this is the only place we pass a database name on
+ * the command line, except template1 that doesn't need quoting.
+ */
#ifndef WIN32
appendPQExpBuffer(cmd, "%s\"%s\" %s -Fp '", SYSTEMQUOTE, pg_dump_bin,
#else
#else
appendPQExpBufferChar(cmd, '"');
#endif
-
+
appendPQExpBuffer(cmd, "%s", SYSTEMQUOTE);
if (verbose)
static void
dumpTimestamp(char *msg)
{
- char buf[256];
- time_t now = time(NULL);
+ char buf[256];
+ time_t now = time(NULL);
if (strftime(buf, 256, "%Y-%m-%d %H:%M:%S %Z", localtime(&now)) != 0)
printf("-- %s %s\n\n", msg, buf);
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.60 2004/08/20 04:20:23 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_restore.c,v 1.61 2004/08/29 05:06:53 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AH->verbose = opts->verbose;
/*
- * Whether to keep submitting sql commands as "pg_restore ... | psql ... "
+ * Whether to keep submitting sql commands as "pg_restore ... | psql
+ * ... "
*/
AH->exit_on_error = opts->exit_on_error;
/* done, print a summary of ignored errors */
if (AH->n_errors)
- fprintf(stderr, _("WARNING, errors ignored on restore: %d\n"),
+ fprintf(stderr, _("WARNING, errors ignored on restore: %d\n"),
AH->n_errors);
/* AH may be freed in CloseArchive? */
- exit_code = AH->n_errors? 1: 0;
+ exit_code = AH->n_errors ? 1 : 0;
CloseArchive(AH);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.22 2004/08/29 04:13:01 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pg_resetxlog/pg_resetxlog.c,v 1.23 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define _(x) gettext((x))
-char XLogDir[MAXPGPATH]; /* not static, see xlog_internal.h */
+char XLogDir[MAXPGPATH]; /* not static, see xlog_internal.h */
static char ControlFilePath[MAXPGPATH];
static ControlFileData ControlFile; /* pg_control values */
printf(_("pg_control values:\n\n"));
/*
- * Format system_identifier separately to keep platform-dependent format
- * code out of the translatable message string.
+ * Format system_identifier separately to keep platform-dependent
+ * format code out of the translatable message string.
*/
snprintf(sysident_str, sizeof(sysident_str), UINT64_FORMAT,
ControlFile.system_identifier);
/* adjust in case we are changing segment size */
newXlogSeg *= ControlFile.xlog_seg_size;
- newXlogSeg = (newXlogSeg + XLogSegSize-1) / XLogSegSize;
+ newXlogSeg = (newXlogSeg + XLogSegSize - 1) / XLogSegSize;
/* be sure we wrap around correctly at end of a logfile */
NextLogSeg(newXlogId, newXlogSeg);
errno = 0;
}
#ifdef WIN32
- /* This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
- not in released version */
+
+ /*
+ * This fix is in mingw cvs (runtime/mingwex/dirent.c rev 1.4), but
+ * not in released version
+ */
if (GetLastError() == ERROR_NO_MORE_FILES)
errno = 0;
#endif
*
* pgevent.c
* Defines the entry point for pgevent dll.
- * The DLL defines event source for backend
+ * The DLL defines event source for backend
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/bin/pgevent/pgevent.c,v 1.1 2004/06/20 01:32:49 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/pgevent/pgevent.c,v 1.2 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "string.h"
/* Global variables */
-HANDLE g_module = NULL; /* hModule of DLL */
+HANDLE g_module = NULL; /* hModule of DLL */
/* Prototypes */
-STDAPI DllRegisterServer(void) ;
-STDAPI DllUnregisterServer(void);
-BOOL WINAPI DllMain( HANDLE hModule, DWORD ul_reason_for_call, LPVOID lpReserved );
+STDAPI
+DllRegisterServer(void);
+STDAPI DllUnregisterServer(void);
+BOOL WINAPI DllMain(HANDLE hModule, DWORD ul_reason_for_call, LPVOID lpReserved);
/*
- * DllRegisterServer --- Instructs DLL to create its registry entries
+ * DllRegisterServer --- Instructs DLL to create its registry entries
*/
-STDAPI DllRegisterServer(void)
+STDAPI
+DllRegisterServer(void)
{
- HKEY key;
- DWORD data;
- char buffer[_MAX_PATH];
+ HKEY key;
+ DWORD data;
+ char buffer[_MAX_PATH];
- /* Set the name of DLL full path name. */
- if (!GetModuleFileName((HMODULE)g_module, buffer, sizeof(buffer)))
+ /* Set the name of DLL full path name. */
+ if (!GetModuleFileName((HMODULE) g_module, buffer, sizeof(buffer)))
{
- MessageBox(NULL, "Could not retrieve DLL filename", "PostgreSQL error", MB_OK|MB_ICONSTOP);
- return SELFREG_E_TYPELIB;
+ MessageBox(NULL, "Could not retrieve DLL filename", "PostgreSQL error", MB_OK | MB_ICONSTOP);
+ return SELFREG_E_TYPELIB;
}
- /* Add PostgreSQL source name as a subkey under the Application
- key in the EventLog registry key. */
- if ( RegCreateKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL", &key) )
+ /*
+ * Add PostgreSQL source name as a subkey under the Application key in
+ * the EventLog registry key.
+ */
+ if (RegCreateKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL", &key))
{
- MessageBox(NULL, "Could not create the registry key.", "PostgreSQL error", MB_OK|MB_ICONSTOP);
- return SELFREG_E_TYPELIB;
+ MessageBox(NULL, "Could not create the registry key.", "PostgreSQL error", MB_OK | MB_ICONSTOP);
+ return SELFREG_E_TYPELIB;
}
- /* Add the name to the EventMessageFile subkey. */
- if (RegSetValueEx(key,
- "EventMessageFile",
- 0,
- REG_EXPAND_SZ,
- (LPBYTE) buffer,
- strlen(buffer) + 1))
+ /* Add the name to the EventMessageFile subkey. */
+ if (RegSetValueEx(key,
+ "EventMessageFile",
+ 0,
+ REG_EXPAND_SZ,
+ (LPBYTE) buffer,
+ strlen(buffer) + 1))
{
- MessageBox(NULL, "Could not set the event message file.", "PostgreSQL error", MB_OK|MB_ICONSTOP);
- return SELFREG_E_TYPELIB;
+ MessageBox(NULL, "Could not set the event message file.", "PostgreSQL error", MB_OK | MB_ICONSTOP);
+ return SELFREG_E_TYPELIB;
}
-
- /* Set the supported event types in the TypesSupported subkey. */
- data = EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | EVENTLOG_INFORMATION_TYPE;
-
- if (RegSetValueEx(key,
- "TypesSupported",
- 0,
- REG_DWORD,
- (LPBYTE) &data,
- sizeof(DWORD)))
+
+ /* Set the supported event types in the TypesSupported subkey. */
+ data = EVENTLOG_ERROR_TYPE | EVENTLOG_WARNING_TYPE | EVENTLOG_INFORMATION_TYPE;
+
+ if (RegSetValueEx(key,
+ "TypesSupported",
+ 0,
+ REG_DWORD,
+ (LPBYTE) & data,
+ sizeof(DWORD)))
{
- MessageBox(NULL, "Could not set the supported types.", "PostgreSQL error", MB_OK|MB_ICONSTOP);
- return SELFREG_E_TYPELIB;
+ MessageBox(NULL, "Could not set the supported types.", "PostgreSQL error", MB_OK | MB_ICONSTOP);
+ return SELFREG_E_TYPELIB;
}
-
- RegCloseKey(key);
+
+ RegCloseKey(key);
return S_OK;
}
* DllUnregisterServer --- Instructs DLL to remove only those entries created through DllRegisterServer
*/
-STDAPI DllUnregisterServer(void)
+STDAPI
+DllUnregisterServer(void)
{
- /* Remove PostgreSQL source name as a subkey under the Application
- key in the EventLog registry key. */
-
- if ( RegDeleteKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL") )
+ /*
+ * Remove PostgreSQL source name as a subkey under the Application key
+ * in the EventLog registry key.
+ */
+
+ if (RegDeleteKey(HKEY_LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\PostgreSQL"))
{
- MessageBox(NULL, "Could not delete the registry key.", "PostgreSQL error", MB_OK|MB_ICONSTOP);
- return SELFREG_E_TYPELIB;
+ MessageBox(NULL, "Could not delete the registry key.", "PostgreSQL error", MB_OK | MB_ICONSTOP);
+ return SELFREG_E_TYPELIB;
}
return S_OK;
}
* DllMain --- is an optional entry point into a DLL.
*/
-BOOL WINAPI DllMain( HANDLE hModule,
- DWORD ul_reason_for_call,
- LPVOID lpReserved
- )
+BOOL WINAPI
+DllMain(HANDLE hModule,
+ DWORD ul_reason_for_call,
+ LPVOID lpReserved
+)
{
- if ( ul_reason_for_call == DLL_PROCESS_ATTACH )
- {
+ if (ul_reason_for_call == DLL_PROCESS_ATTACH)
g_module = hModule;
- }
- return TRUE;
+ return TRUE;
}
-
-//
-// Values are 32 bit values layed out as follows:
-//
-// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
-// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
-// +---+-+-+-----------------------+-------------------------------+
-// |Sev|C|R| Facility | Code |
-// +---+-+-+-----------------------+-------------------------------+
-//
-// where
-//
-// Sev - is the severity code
-//
-// 00 - Success
-// 01 - Informational
-// 10 - Warning
-// 11 - Error
-//
-// C - is the Customer code flag
-//
-// R - is a reserved bit
-//
-// Facility - is the facility code
-//
-// Code - is the facility's status code
-//
-//
-// Define the facility codes
-//
+/* */
+/* Values are 32 bit values layed out as follows: */
+/* */
+/* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 */
+/* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 */
+/* +---+-+-+-----------------------+-------------------------------+ */
+/* |Sev|C|R| Facility | Code | */
+/* +---+-+-+-----------------------+-------------------------------+ */
+/* */
+/* where */
+/* */
+/* Sev - is the severity code */
+/* */
+/* 00 - Success */
+/* 01 - Informational */
+/* 10 - Warning */
+/* 11 - Error */
+/* */
+/* C - is the Customer code flag */
+/* */
+/* R - is a reserved bit */
+/* */
+/* Facility - is the facility code */
+/* */
+/* Code - is the facility's status code */
+/* */
+/* */
+/* Define the facility codes */
+/* */
-//
-// Define the severity codes
-//
+/* */
+/* Define the severity codes */
+/* */
-//
-// MessageId: PGWIN32_EVENTLOG_MSG
-//
-// MessageText:
-//
-// %1
-//
-#define PGWIN32_EVENTLOG_MSG 0x00000000L
-
+/* */
+/* MessageId: PGWIN32_EVENTLOG_MSG */
+/* */
+/* MessageText: */
+/* */
+/* %1 */
+/* */
+#define PGWIN32_EVENTLOG_MSG 0x00000000L
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.124 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.c,v 1.125 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "command.h"
/* functions for use in this file */
static backslashResult exec_command(const char *cmd,
- PsqlScanState scan_state,
- PQExpBuffer query_buf);
+ PsqlScanState scan_state,
+ PQExpBuffer query_buf);
static bool do_edit(const char *filename_arg, PQExpBuffer query_buf);
static bool do_connect(const char *new_dbname, const char *new_user);
static bool do_shell(const char *command);
* one-letter command with immediately following argument (a
* still-supported, but no longer encouraged, syntax).
*/
- char new_cmd[2];
+ char new_cmd[2];
/* don't change cmd until we know it's okay */
new_cmd[0] = cmd[0];
else if (pg_strcasecmp(cmd, "copy") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
success = do_copy(opt);
free(opt);
fout = stdout;
while ((value = psql_scan_slash_option(scan_state,
- OT_NORMAL, "ed, false)))
+ OT_NORMAL, "ed, false)))
{
if (!quoted && strcmp(value, "-n") == 0)
no_newline = true;
else if (strcmp(cmd, "encoding") == 0)
{
char *encoding = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
if (!encoding)
{
else if (strcmp(cmd, "f") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
success = do_pset("fieldsep", fname, &pset.popt, quiet);
free(fname);
else if (strcmp(cmd, "g") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_FILEPIPE, NULL, false);
+ OT_FILEPIPE, NULL, false);
if (!fname)
pset.gfname = NULL;
else if (strcmp(cmd, "h") == 0 || strcmp(cmd, "help") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
helpSQL(opt, pset.popt.topt.pager);
free(opt);
else if (strcmp(cmd, "o") == 0 || strcmp(cmd, "out") == 0)
{
char *fname = psql_scan_slash_option(scan_state,
- OT_FILEPIPE, NULL, true);
+ OT_FILEPIPE, NULL, true);
expand_tilde(&fname);
success = setQFout(fname);
else if (strcmp(cmd, "T") == 0)
{
char *value = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, false);
+ OT_NORMAL, NULL, false);
success = do_pset("tableattr", value, &pset.popt, quiet);
free(value);
else if (strcmp(cmd, "z") == 0)
{
char *pattern = psql_scan_slash_option(scan_state,
- OT_NORMAL, NULL, true);
+ OT_NORMAL, NULL, true);
success = permissionsList(pattern);
if (pattern)
else if (strcmp(cmd, "!") == 0)
{
char *opt = psql_scan_slash_option(scan_state,
- OT_WHOLE_LINE, NULL, false);
+ OT_WHOLE_LINE, NULL, false);
success = do_shell(opt);
free(opt);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/command.h,v 1.20 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/command.h,v 1.21 2004/08/29 05:06:54 momjian Exp $
*/
#ifndef COMMAND_H
#define COMMAND_H
extern backslashResult HandleSlashCmds(PsqlScanState scan_state,
- PQExpBuffer query_buf);
+ PQExpBuffer query_buf);
extern int process_file(char *filename);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.89 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/common.c,v 1.90 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
#define DIFF_MSEC(T, U) \
(((T)->time - (U)->time) * 1000.0 + \
((T)->millitm - (U)->millitm))
-
#endif
extern bool prompt_state;
int loc = 0;
const char *sp;
- int clen, slen, i, *qidx, *scridx, qoffset, scroffset, ibeg, iend,
- loc_line;
- char *wquery;
- bool beg_trunc, end_trunc;
+ int clen,
+ slen,
+ i,
+ *qidx,
+ *scridx,
+ qoffset,
+ scroffset,
+ ibeg,
+ iend,
+ loc_line;
+ char *wquery;
+ bool beg_trunc,
+ end_trunc;
PQExpBufferData msg;
if (pset.verbosity == PQERRORS_TERSE)
psql_assert(clen < slen);
/* convert loc to zero-based offset in qidx/scridx arrays */
- loc--;
+ loc--;
/* do we have something to show? */
if (loc >= 0 && loc <= clen)
{
- /* input line number of our syntax error. */
+ /* input line number of our syntax error. */
loc_line = 1;
/* first included char of extract. */
- ibeg = 0;
+ ibeg = 0;
/* last-plus-1 included char of extract. */
- iend = clen;
+ iend = clen;
/*
* Replace tabs with spaces in the writable copy. (Later we might
* but not today.)
*
* Extract line number and begin and end indexes of line containing
- * error location. There will not be any newlines or carriage
+ * error location. There will not be any newlines or carriage
* returns in the selected extract.
*/
- for (i=0; i<clen; i++)
+ for (i = 0; i < clen; i++)
{
/* character length must be 1 or it's not ASCII */
- if ((qidx[i+1]-qidx[i]) == 1)
+ if ((qidx[i + 1] - qidx[i]) == 1)
{
- if (wquery[qidx[i]] == '\t')
+ if (wquery[qidx[i]] == '\t')
wquery[qidx[i]] = ' ';
else if (wquery[qidx[i]] == '\r' || wquery[qidx[i]] == '\n')
{
if (i < loc)
{
/*
- * count lines before loc. Each \r or \n counts
+ * count lines before loc. Each \r or \n counts
* as a line except when \r \n appear together.
*/
if (wquery[qidx[i]] == '\r' ||
i == 0 ||
- (qidx[i]-qidx[i-1]) != 1 ||
- wquery[qidx[i-1]] != '\r')
+ (qidx[i] - qidx[i - 1]) != 1 ||
+ wquery[qidx[i - 1]] != '\r')
loc_line++;
/* extract beginning = last line start before loc. */
- ibeg = i+1;
+ ibeg = i + 1;
}
else
{
/* If the line extracted is too long, we truncate it. */
beg_trunc = false;
end_trunc = false;
- if (scridx[iend]-scridx[ibeg] > DISPLAY_SIZE)
+ if (scridx[iend] - scridx[ibeg] > DISPLAY_SIZE)
{
/*
* We first truncate right if it is enough. This code might
* be off a space or so on enforcing MIN_RIGHT_CUT if there's
* a wide character right there, but that should be okay.
*/
- if (scridx[ibeg]+DISPLAY_SIZE >= scridx[loc]+MIN_RIGHT_CUT)
+ if (scridx[ibeg] + DISPLAY_SIZE >= scridx[loc] + MIN_RIGHT_CUT)
{
- while (scridx[iend]-scridx[ibeg] > DISPLAY_SIZE)
+ while (scridx[iend] - scridx[ibeg] > DISPLAY_SIZE)
iend--;
end_trunc = true;
}
else
{
/* Truncate right if not too close to loc. */
- while (scridx[loc]+MIN_RIGHT_CUT < scridx[iend])
+ while (scridx[loc] + MIN_RIGHT_CUT < scridx[iend])
{
iend--;
end_trunc = true;
}
/* Truncate left if still too long. */
- while (scridx[iend]-scridx[ibeg] > DISPLAY_SIZE)
+ while (scridx[iend] - scridx[ibeg] > DISPLAY_SIZE)
{
ibeg++;
beg_trunc = true;
}
/* the extract MUST contain the target position! */
- psql_assert(ibeg<=loc && loc<=iend);
+ psql_assert(ibeg <= loc && loc <= iend);
/* truncate working copy at desired endpoint */
wquery[qidx[iend]] = '\0';
*/
scroffset = 0;
for (i = 0; i < msg.len; i += PQmblen(&msg.data[i], pset.encoding))
- {
scroffset += PQdsplen(&msg.data[i], pset.encoding);
- }
/* Finish and emit the message. */
appendPQExpBufferStr(&msg, &wquery[qidx[ibeg]]);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.53 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/copy.c,v 1.54 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "copy.h"
char *null;
char *quote;
char *escape;
- char *force_quote_list;
- char *force_notnull_list;
+ char *force_quote_list;
+ char *force_notnull_list;
};
result->file = NULL;
}
else if (pg_strcasecmp(token, "pstdin") == 0 ||
- pg_strcasecmp(token, "pstdout") == 0)
+ pg_strcasecmp(token, "pstdout") == 0)
{
result->psql_inout = true;
result->file = NULL;
if (token)
{
/*
- * WITH is optional. Also, the backend will allow WITH followed by
- * nothing, so we do too.
+ * WITH is optional. Also, the backend will allow WITH followed
+ * by nothing, so we do too.
*/
if (pg_strcasecmp(token, "with") == 0)
token = strtokx(NULL, whitespace, NULL, NULL,
while (token)
{
- bool fetch_next;
+ bool fetch_next;
fetch_next = true;
-
+
/* someday allow BINARY here */
if (pg_strcasecmp(token, "oids") == 0)
- {
result->oids = true;
- }
else if (pg_strcasecmp(token, "csv") == 0)
- {
result->csv_mode = true;
- }
else if (pg_strcasecmp(token, "delimiter") == 0)
{
token = strtokx(NULL, whitespace, NULL, "'",
PGresult *result;
bool success;
struct stat st;
-
+
/* parse options */
options = parse_slash_copy(args);
}
if (options->csv_mode)
- {
appendPQExpBuffer(&query, " CSV");
- }
-
+
if (options->quote)
{
if (options->quote[0] == '\'')
}
if (options->force_quote_list)
- {
appendPQExpBuffer(&query, " FORCE QUOTE %s", options->force_quote_list);
- }
if (options->force_notnull_list)
- {
appendPQExpBuffer(&query, " FORCE NOT NULL %s", options->force_notnull_list);
- }
if (options->file)
canonicalize_path(options->file);
if (options->file)
copystream = fopen(options->file, PG_BINARY_R);
else if (!options->psql_inout)
- copystream = pset.cur_cmd_source;
+ copystream = pset.cur_cmd_source;
else
- copystream = stdin;
+ copystream = stdin;
}
else
{
if (options->file)
copystream = fopen(options->file, "w");
else if (!options->psql_inout)
- copystream = pset.queryFout;
+ copystream = pset.queryFout;
else
copystream = stdout;
}
PQclear(result);
- if (options->file != NULL)
+ if (options->file != NULL)
{
if (fclose(copystream) != 0)
{
{
if (!QUIET())
puts(gettext("Enter data to be copied followed by a newline.\n"
- "End with a backslash and a period on a line by itself."));
+ "End with a backslash and a period on a line by itself."));
prompt = get_prompt(PROMPT_COPY);
}
else
- {
prompt = NULL;
- }
while (!copydone)
{ /* for each input line ... */
if (c == EOF && s == copybuf && firstload)
{
/*
- * We are guessing a little bit as to the right line-ending
- * here...
+ * We are guessing a little bit as to the right
+ * line-ending here...
*/
if (saw_cr)
PQputline(conn, "\\.\r\n");
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.105 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/describe.c,v 1.106 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "describe.h"
const char *schemavar, const char *namevar,
const char *altnamevar, const char *visibilityrule);
-static void add_tablespace_footer(char relkind, Oid tablespace,
- char **footers, int *count, PQExpBufferData buf);
+static void add_tablespace_footer(char relkind, Oid tablespace,
+ char **footers, int *count, PQExpBufferData buf);
/*----------------
* Handlers for various slash commands displaying some sort of list
PGresult *res;
printQueryOpt myopt = pset.popt;
- if (pset.sversion < 70500) {
- fprintf(stderr, _("This server version (%d) does not support tablespaces.\n"),
- pset.sversion);
- return true;
+ if (pset.sversion < 70500)
+ {
+ fprintf(stderr, _("This server version (%d) does not support tablespaces.\n"),
+ pset.sversion);
+ return true;
}
initPQExpBuffer(&buf);
printfPQExpBuffer(&buf,
"SELECT spcname AS \"%s\",\n"
- " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
+ " pg_catalog.pg_get_userbyid(spcowner) AS \"%s\",\n"
" spclocation AS \"%s\"",
_("Name"), _("Owner"), _("Location"));
if (verbose)
appendPQExpBuffer(&buf,
- ",\n spcacl as \"%s\"",
- _("Access privileges"));
-
+ ",\n spcacl as \"%s\"",
+ _("Access privileges"));
+
appendPQExpBuffer(&buf,
"\nFROM pg_catalog.pg_tablespace\n");
char relkind;
bool hasindex;
bool hasrules;
- bool hasoids;
+ bool hasoids;
Oid tablespace;
} tableinfo;
bool show_modifiers = false;
/* Get general table info */
printfPQExpBuffer(&buf,
- "SELECT relhasindex, relkind, relchecks, reltriggers, relhasrules, \n"
- "relhasoids %s \n"
+ "SELECT relhasindex, relkind, relchecks, reltriggers, relhasrules, \n"
+ "relhasoids %s \n"
"FROM pg_catalog.pg_class WHERE oid = '%s'",
pset.sversion >= 70500 ? ", reltablespace" : "",
oid);
tableinfo.hasindex = strcmp(PQgetvalue(res, 0, 0), "t") == 0;
tableinfo.hasrules = strcmp(PQgetvalue(res, 0, 4), "t") == 0;
tableinfo.hasoids = strcmp(PQgetvalue(res, 0, 5), "t") == 0;
- tableinfo.tablespace = (pset.sversion >= 70500) ?
- atooid(PQgetvalue(res, 0, 6)) : 0;
+ tableinfo.tablespace = (pset.sversion >= 70500) ?
+ atooid(PQgetvalue(res, 0, 6)) : 0;
PQclear(res);
headers[0] = _("Column");
PGresult *result;
printfPQExpBuffer(&buf,
- "SELECT i.indisunique, i.indisprimary, i.indisclustered, a.amname, c2.relname,\n"
- " pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
+ "SELECT i.indisunique, i.indisprimary, i.indisclustered, a.amname, c2.relname,\n"
+ " pg_catalog.pg_get_expr(i.indpred, i.indrelid, true)\n"
"FROM pg_catalog.pg_index i, pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_am a\n"
"WHERE i.indexrelid = c.oid AND c.oid = '%s' AND c.relam = a.oid\n"
"AND i.indrelid = c2.oid",
footers = pg_malloc_zero(4 * sizeof(*footers));
footers[count_footers++] = pg_strdup(tmpbuf.data);
add_tablespace_footer(tableinfo.relkind, tableinfo.tablespace,
- footers, &count_footers, tmpbuf);
+ footers, &count_footers, tmpbuf);
footers[count_footers] = NULL;
}
if (tableinfo.hasindex)
{
printfPQExpBuffer(&buf,
- "SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, "
- "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true)\n"
+ "SELECT c2.relname, i.indisprimary, i.indisunique, i.indisclustered, "
+ "pg_catalog.pg_get_indexdef(i.indexrelid, 0, true)\n"
"FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i\n"
"WHERE c.oid = '%s' AND c.oid = i.indrelid AND i.indexrelid = c2.oid\n"
"ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname",
{
printfPQExpBuffer(&buf,
"SELECT "
- "pg_catalog.pg_get_constraintdef(r.oid, true), "
+ "pg_catalog.pg_get_constraintdef(r.oid, true), "
"conname\n"
"FROM pg_catalog.pg_constraint r\n"
- "WHERE r.conrelid = '%s' AND r.contype = 'c'",
+ "WHERE r.conrelid = '%s' AND r.contype = 'c'",
oid);
result2 = PSQLexec(buf.data, false);
if (!result2)
{
printfPQExpBuffer(&buf,
"SELECT conname,\n"
- " pg_catalog.pg_get_constraintdef(oid, true) as condef\n"
+ " pg_catalog.pg_get_constraintdef(oid, true) as condef\n"
"FROM pg_catalog.pg_constraint r\n"
"WHERE r.conrelid = '%s' AND r.contype = 'f'",
oid);
if (verbose)
{
- char *s = _("Contains OIDs");
+ char *s = _("Contains OIDs");
+
printfPQExpBuffer(&buf, "%s: %s", s,
(tableinfo.hasoids ? _("yes") : _("no")));
footers[count_footers++] = pg_strdup(buf.data);
}
add_tablespace_footer(tableinfo.relkind, tableinfo.tablespace,
- footers, &count_footers, buf);
+ footers, &count_footers, buf);
/* end of list marker */
footers[count_footers] = NULL;
static void
-add_tablespace_footer(char relkind, Oid tablespace, char **footers,
- int *count, PQExpBufferData buf)
+add_tablespace_footer(char relkind, Oid tablespace, char **footers,
+ int *count, PQExpBufferData buf)
{
/* relkinds for which we support tablespaces */
- if(relkind == 'r' || relkind == 'i')
+ if (relkind == 'r' || relkind == 'i')
{
/*
* We ignore the database default tablespace so that users not
* using tablespaces don't need to know about them.
*/
- if(tablespace != 0)
+ if (tablespace != 0)
{
PGresult *result1 = NULL;
+
printfPQExpBuffer(&buf, "SELECT spcname FROM pg_tablespace \n"
- "WHERE oid = '%u';", tablespace);
+ "WHERE oid = '%u';", tablespace);
result1 = PSQLexec(buf.data, false);
- if (!result1)
+ if (!result1)
return;
/* Should always be the case, but.... */
- if(PQntuples(result1) > 0)
+ if (PQntuples(result1) > 0)
{
printfPQExpBuffer(&buf, _("Tablespace: \"%s\""),
- PQgetvalue(result1, 0, 0));
+ PQgetvalue(result1, 0, 0));
footers[(*count)++] = pg_strdup(buf.data);
}
PQclear(result1);
initPQExpBuffer(&buf);
printfPQExpBuffer(&buf,
- "SELECT n.nspname AS \"%s\",\n"
- " u.usename AS \"%s\"",
- _("Name"), _("Owner"));
-
+ "SELECT n.nspname AS \"%s\",\n"
+ " u.usename AS \"%s\"",
+ _("Name"), _("Owner"));
+
if (verbose)
appendPQExpBuffer(&buf,
- ",\n n.nspacl as \"%s\","
- " pg_catalog.obj_description(n.oid, 'pg_namespace') as \"%s\"",
- _("Access privileges"), _("Description"));
-
+ ",\n n.nspacl as \"%s\","
+ " pg_catalog.obj_description(n.oid, 'pg_namespace') as \"%s\"",
+ _("Access privileges"), _("Description"));
+
appendPQExpBuffer(&buf,
- "\nFROM pg_catalog.pg_namespace n LEFT JOIN pg_catalog.pg_user u\n"
- " ON n.nspowner=u.usesysid\n"
- "WHERE (n.nspname NOT LIKE 'pg\\\\_temp\\\\_%%' OR\n"
- " n.nspname = (pg_catalog.current_schemas(true))[1])\n"); /* temp schema is first */
+ "\nFROM pg_catalog.pg_namespace n LEFT JOIN pg_catalog.pg_user u\n"
+ " ON n.nspowner=u.usesysid\n"
+ "WHERE (n.nspname NOT LIKE 'pg\\\\_temp\\\\_%%' OR\n"
+ " n.nspname = (pg_catalog.current_schemas(true))[1])\n"); /* temp schema is first */
processNamePattern(&buf, pattern, true, false,
NULL, "n.nspname", NULL,
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.92 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/help.c,v 1.93 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
fprintf(output, _(" \\pset NAME [VALUE]\n"
" set table output option\n"
" (NAME := {format|border|expanded|fieldsep|footer|null|\n"
- " recordsep|tuples_only|title|tableattr|pager})\n"));
+ " recordsep|tuples_only|title|tableattr|pager})\n"));
fprintf(output, _(" \\t show only rows (currently %s)\n"),
ON(pset.popt.topt.tuples_only));
fprintf(output, _(" \\T [STRING] set HTML
tag attributes, or unset if none\n"));
fprintf(output, _(" \\lo_export LOBOID FILE\n"
" \\lo_import FILE [COMMENT] \n"
" \\lo_list\n"
- " \\lo_unlink LOBOID large object operations\n"));
+ " \\lo_unlink LOBOID large object operations\n"));
if (output != stdout)
{
FILE *output;
size_t len;
int nl_count = 0;
- char *ch;
+ char *ch;
/* don't care about trailing spaces or semicolons */
len = strlen(topic);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.36 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/input.c,v 1.37 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "input.h"
#ifdef USE_READLINE
if (flags & 1)
{
- char home[MAXPGPATH];
+ char home[MAXPGPATH];
useReadline = true;
initialize_readline();
using_history();
if (get_home_path(home))
{
- char *psql_history;
+ char *psql_history;
psql_history = pg_malloc(strlen(home) + 1 +
strlen(PSQLHISTORY) + 1);
#ifdef USE_READLINE
if (useHistory)
{
- char home[MAXPGPATH];
+ char home[MAXPGPATH];
if (get_home_path(home))
{
- char *psql_history;
- int hist_size;
+ char *psql_history;
+ int hist_size;
psql_history = pg_malloc(strlen(home) + 1 +
strlen(PSQLHISTORY) + 1);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.63 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/mainloop.c,v 1.64 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "mainloop.h"
volatile promptStatus_t prompt_status = PROMPT_READY;
volatile int count_eof = 0;
volatile bool die_on_error = false;
+
/* Save the prior command source */
FILE *prev_cmd_source;
bool prev_cmd_interactive;
prompt_status = PROMPT_READY;
if (pset.cur_cmd_interactive)
- {
putc('\n', stdout);
- }
else
{
successResult = EXIT_USER;
/* handle backslash command */
/*
- * If we added a newline to query_buf, and nothing else has
- * been inserted in query_buf by the lexer, then strip off
- * the newline again. This avoids any change to query_buf
- * when a line contains only a backslash command.
+ * If we added a newline to query_buf, and nothing else
+ * has been inserted in query_buf by the lexer, then strip
+ * off the newline again. This avoids any change to
+ * query_buf when a line contains only a backslash
+ * command.
*/
if (query_buf->len == added_nl_pos)
query_buf->data[--query_buf->len] = '\0';
slashCmdStatus = HandleSlashCmds(scan_state,
query_buf->len > 0 ?
- query_buf : previous_buf);
+ query_buf : previous_buf);
success = slashCmdStatus != CMD_ERROR;
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.50 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/print.c,v 1.51 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "common.h"
fprintf(fout, "%s\n", *ptr);
#ifndef __MINGW32__
- /* for some reason MinGW outputs an extra newline, so this supresses it */
+
+ /*
+ * for some reason MinGW outputs an extra newline, so this supresses
+ * it
+ */
fputc('\n', fout);
#endif
fputs("\\begin{tabular}{", fout);
if (opt_border == 2)
- fputs("| ", fout);
- for (i = 0; i < col_count; i++)
+ fputs("| ", fout);
+ for (i = 0; i < col_count; i++)
{
- fputc(*(opt_align + i), fout);
- if (opt_border != 0 && i < col_count - 1)
- fputs (" | ", fout);
+ fputc(*(opt_align + i), fout);
+ if (opt_border != 0 && i < col_count - 1)
+ fputs(" | ", fout);
}
if (opt_border == 2)
- fputs(" |", fout);
+ fputs(" |", fout);
fputs("}\n", fout);
{
if (i != 0)
fputs(" & ", fout);
- fputs("\\textit{", fout);
+ fputs("\\textit{", fout);
latex_escaped_print(*ptr, fout);
- fputc('}', fout);
+ fputc('}', fout);
}
}
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/prompt.c,v 1.36 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/prompt.c,v 1.37 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
#include "prompt.h"
break;
}
- case '[':
- case ']':
+ case '[':
+ case ']':
#if defined(USE_READLINE) && defined(RL_PROMPT_START_IGNORE)
- /*
- * readline >=4.0 undocumented feature: non-printing
- * characters in prompt strings must be marked as such,
- * in order to properly display the line during editing.
- */
- buf[0] = '\001';
- buf[1] = (*p == '[') ? RL_PROMPT_START_IGNORE : RL_PROMPT_END_IGNORE;
-#endif /* USE_READLINE */
- break;
+
+ /*
+ * readline >=4.0 undocumented feature: non-printing
+ * characters in prompt strings must be marked as
+ * such, in order to properly display the line during
+ * editing.
+ */
+ buf[0] = '\001';
+ buf[1] = (*p == '[') ? RL_PROMPT_START_IGNORE : RL_PROMPT_END_IGNORE;
+#endif /* USE_READLINE */
+ break;
default:
buf[0] = *p;
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/psqlscan.h,v 1.2 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/psqlscan.h,v 1.3 2004/08/29 05:06:54 momjian Exp $
*/
#ifndef PSQLSCAN_H
#define PSQLSCAN_H
extern void psql_scan_destroy(PsqlScanState state);
extern void psql_scan_setup(PsqlScanState state,
- const char *line, int line_len);
+ const char *line, int line_len);
extern void psql_scan_finish(PsqlScanState state);
extern PsqlScanResult psql_scan(PsqlScanState state,
- PQExpBuffer query_buf,
- promptStatus_t *prompt);
+ PQExpBuffer query_buf,
+ promptStatus_t *prompt);
extern void psql_scan_reset(PsqlScanState state);
extern char *psql_scan_slash_command(PsqlScanState state);
extern char *psql_scan_slash_option(PsqlScanState state,
- enum slash_option_type type,
- char *quote,
- bool semicolon);
+ enum slash_option_type type,
+ char *quote,
+ bool semicolon);
extern void psql_scan_slash_command_end(PsqlScanState state);
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.20 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/settings.h,v 1.21 2004/08/29 05:06:54 momjian Exp $
*/
#ifndef SETTINGS_H
#define SETTINGS_H
FILE *cur_cmd_source; /* describe the status of the current main
* loop */
bool cur_cmd_interactive;
- int sversion; /* backend server version */
+ int sversion; /* backend server version */
const char *progname; /* in case you renamed psql */
char *inputfile; /* for error reporting */
unsigned lineno; /* also for error reporting */
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.98 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/startup.c,v 1.99 2004/08/29 05:06:54 momjian Exp $
*/
#include "postgres_fe.h"
PsqlSettings pset;
#define SYSPSQLRC "psqlrc"
-#define PSQLRC ".psqlrc"
+#define PSQLRC ".psqlrc"
/*
* Structures to pass information between the option parsing routine
}
#ifdef WIN32
- setvbuf(stderr,NULL,_IONBF,0);
+ setvbuf(stderr, NULL, _IONBF, 0);
#endif
pset.cur_cmd_source = stdin;
pset.cur_cmd_interactive = false;
"Type: \\copyright for distribution terms\n"
" \\h for help with SQL commands\n"
" \\? for help with psql commands\n"
- " \\g or terminate with semicolon to execute query\n"
+ " \\g or terminate with semicolon to execute query\n"
" \\q to quit\n\n"),
pset.progname, PG_VERSION);
#ifdef USE_SSL
process_psqlrc(char *argv0)
{
char *psqlrc;
- char home[MAXPGPATH];
- char global_file[MAXPGPATH];
- char my_exec_path[MAXPGPATH];
- char etc_path[MAXPGPATH];
+ char home[MAXPGPATH];
+ char global_file[MAXPGPATH];
+ char my_exec_path[MAXPGPATH];
+ char etc_path[MAXPGPATH];
find_my_exec(argv0, my_exec_path);
get_etc_path(my_exec_path, etc_path);
if (access(psqlrc, R_OK) == 0)
process_file(psqlrc);
else if (access(filename, R_OK) == 0)
- process_file(filename);
+ process_file(filename);
free(psqlrc);
}
printf(gettext("SSL connection (cipher: %s, bits: %i)\n\n"),
SSL_get_cipher(ssl), sslbits);
}
-
#endif
static void
checkWin32Codepage(void)
{
- unsigned int wincp, concp;
+ unsigned int wincp,
+ concp;
wincp = GetACP();
concp = GetConsoleCP();
- if (wincp != concp) {
- printf("Warning: Console codepage (%u) differs from windows codepage (%u)\n"
- " 8-bit characters will not work correctly. See PostgreSQL\n"
- " documentation \"Installation on Windows\" for details.\n\n",
- concp, wincp);
+ if (wincp != concp)
+ {
+ printf("Warning: Console codepage (%u) differs from windows codepage (%u)\n"
+ " 8-bit characters will not work correctly. See PostgreSQL\n"
+ " documentation \"Installation on Windows\" for details.\n\n",
+ concp, wincp);
}
}
+
#endif
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.113 2004/08/29 04:13:02 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/psql/tab-complete.c,v 1.114 2004/08/29 05:06:54 momjian Exp $
*/
/*----------------------------------------------------------------------
* "pg_catalog.pg_class c". Note that "pg_namespace n" will be added.
*/
const char *catname;
+
/*
- * Selection condition --- only rows meeting this condition are candidates
- * to display. If catname mentions multiple tables, include the
- * necessary join condition here. For example, "c.relkind = 'r'".
- * Write NULL (not an empty string) if not needed.
+ * Selection condition --- only rows meeting this condition are
+ * candidates to display. If catname mentions multiple tables,
+ * include the necessary join condition here. For example, "c.relkind
+ * = 'r'". Write NULL (not an empty string) if not needed.
*/
const char *selcondition;
+
/*
* Visibility condition --- which rows are visible without schema
- * qualification? For example, "pg_catalog.pg_table_is_visible(c.oid)".
+ * qualification? For example,
+ * "pg_catalog.pg_table_is_visible(c.oid)".
*/
const char *viscondition;
+
/*
- * Namespace --- name of field to join to pg_namespace.oid.
- * For example, "c.relnamespace".
+ * Namespace --- name of field to join to pg_namespace.oid. For
+ * example, "c.relnamespace".
*/
const char *namespace;
+
/*
* Result --- the appropriately-quoted name to return, in the case of
- * an unqualified name. For example, "pg_catalog.quote_ident(c.relname)".
+ * an unqualified name. For example,
+ * "pg_catalog.quote_ident(c.relname)".
*/
const char *result;
+
/*
* In some cases a different result must be used for qualified names.
* Enter that here, or write NULL if result can be used.
* the completion callback functions. Ugly but there is no better way.
*/
static const char *completion_charp; /* to pass a string */
-static const char * const *completion_charpp; /* to pass a list of strings */
-static const char *completion_info_charp; /* to pass a second string */
-static const SchemaQuery *completion_squery; /* to pass a SchemaQuery */
+static const char *const * completion_charpp; /* to pass a list of
+ * strings */
+static const char *completion_info_charp; /* to pass a second string */
+static const SchemaQuery *completion_squery; /* to pass a SchemaQuery */
/* A couple of macros to ease typing. You can use these to complete the given
string with
* restricted to names matching a partially entered name. In these queries,
* %s will be replaced by the text entered so far (suitably escaped to
* become a SQL literal string). %d will be replaced by the length of the
- * string (in unescaped form). A second %s, if present, will be replaced
+ * string (in unescaped form). A second %s, if present, will be replaced
* by a suitably-escaped version of the string provided in
* completion_info_charp.
*
{"GROUP", "SELECT pg_catalog.quote_ident(groname) FROM pg_catalog.pg_group WHERE substring(pg_catalog.quote_ident(groname),1,%d)='%s'"},
{"LANGUAGE", Query_for_list_of_languages},
{"INDEX", NULL, &Query_for_list_of_indexes},
- {"OPERATOR", NULL, NULL}, /* Querying for this is probably
- * not such a good idea. */
+ {"OPERATOR", NULL, NULL}, /* Querying for this is probably not such
+ * a good idea. */
{"RULE", "SELECT pg_catalog.quote_ident(rulename) FROM pg_catalog.pg_rules WHERE substring(pg_catalog.quote_ident(rulename),1,%d)='%s'"},
{"SCHEMA", Query_for_list_of_schemas},
{"SEQUENCE", NULL, &Query_for_list_of_sequences},
{"TABLE", NULL, &Query_for_list_of_tables},
{"TABLESPACE", Query_for_list_of_tablespaces},
- {"TEMP", NULL, NULL}, /* for CREATE TEMP TABLE ... */
+ {"TEMP", NULL, NULL}, /* for CREATE TEMP TABLE ... */
{"TRIGGER", "SELECT pg_catalog.quote_ident(tgname) FROM pg_catalog.pg_trigger WHERE substring(pg_catalog.quote_ident(tgname),1,%d)='%s'"},
{"TYPE", NULL, &Query_for_list_of_datatypes},
- {"UNIQUE", NULL, NULL}, /* for CREATE UNIQUE INDEX ... */
+ {"UNIQUE", NULL, NULL}, /* for CREATE UNIQUE INDEX ... */
{"USER", Query_for_list_of_users},
{"VIEW", NULL, &Query_for_list_of_views},
{NULL, NULL, NULL} /* end of list */
void
initialize_readline(void)
{
- rl_readline_name = (char *)pset.progname;
+ rl_readline_name = (char *) pset.progname;
rl_attempted_completion_function = (void *) psql_completion;
rl_basic_word_break_characters = "\t\n@$><=;|&{( ";
*prev3_wd,
*prev4_wd;
- static const char * const sql_commands[] = {
+ static const char *const sql_commands[] = {
"ABORT", "ALTER", "ANALYZE", "BEGIN", "CHECKPOINT", "CLOSE", "CLUSTER", "COMMENT",
"COMMIT", "COPY", "CREATE", "DEALLOCATE", "DECLARE", "DELETE", "DROP", "END", "EXECUTE",
"EXPLAIN", "FETCH", "GRANT", "INSERT", "LISTEN", "LOAD", "LOCK", "MOVE", "NOTIFY",
- "PREPARE", "REINDEX", "RELEASE", "RESET", "REVOKE", "ROLLBACK", "SAVEPOINT",
- "SELECT", "SET", "SHOW", "START", "TRUNCATE", "UNLISTEN", "UPDATE", "VACUUM", NULL
+ "PREPARE", "REINDEX", "RELEASE", "RESET", "REVOKE", "ROLLBACK", "SAVEPOINT",
+ "SELECT", "SET", "SHOW", "START", "TRUNCATE", "UNLISTEN", "UPDATE", "VACUUM", NULL
};
- static const char * const pgsql_variables[] = {
+ static const char *const pgsql_variables[] = {
/* these SET arguments are known in gram.y */
"CONSTRAINTS",
"NAMES",
NULL
};
- static const char * const backslash_commands[] = {
+ static const char *const backslash_commands[] = {
"\\a", "\\connect", "\\C", "\\cd", "\\copy", "\\copyright",
"\\d", "\\da", "\\db", "\\dc", "\\dC", "\\dd", "\\dD", "\\df",
"\\dg", "\\di", "\\dl", "\\dn", "\\do", "\\dp", "\\ds", "\\dS",
pg_strcasecmp(prev3_wd, "TABLE") != 0)
{
static const char *const list_ALTER[] =
- {"AGGREGATE", "CONVERSATION", "DATABASE","DOMAIN", "FUNCTION",
- "GROUP", "INDEX", "LANGUAGE", "OPERATOR", "SCHEMA", "SEQUENCE", "TABLE",
+ {"AGGREGATE", "CONVERSATION", "DATABASE", "DOMAIN", "FUNCTION",
+ "GROUP", "INDEX", "LANGUAGE", "OPERATOR", "SCHEMA", "SEQUENCE", "TABLE",
"TABLESPACE", "TRIGGER", "TYPE", "USER", NULL};
COMPLETE_WITH_LIST(list_ALTER);
}
-
+
/* ALTER AGGREGATE,CONVERSION,FUNCTION,SCHEMA */
else if (pg_strcasecmp(prev3_wd, "ALTER") == 0 &&
(pg_strcasecmp(prev2_wd, "AGGREGATE") == 0 ||
- pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
- pg_strcasecmp(prev2_wd, "FUNCTION") == 0 ||
- pg_strcasecmp(prev2_wd, "SCHEMA") == 0 ))
+ pg_strcasecmp(prev2_wd, "CONVERSION") == 0 ||
+ pg_strcasecmp(prev2_wd, "FUNCTION") == 0 ||
+ pg_strcasecmp(prev2_wd, "SCHEMA") == 0))
{
static const char *const list_ALTERGEN[] =
{"OWNER TO", "RENAME TO", NULL};
{
static const char *const list_ALTERINDEX[] =
{"SET TABLESPACE", "OWNER TO", "RENAME TO", NULL};
-
+
COMPLETE_WITH_LIST(list_ALTERINDEX);
}
pg_strcasecmp(prev_wd, "COLUMN") == 0)
COMPLETE_WITH_ATTR(prev3_wd);
else if (pg_strcasecmp(prev3_wd, "TABLE") == 0 &&
- pg_strcasecmp(prev_wd, "CLUSTER") == 0)
+ pg_strcasecmp(prev_wd, "CLUSTER") == 0)
COMPLETE_WITH_CONST("ON");
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
- pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev_wd, "ON") == 0)
+ pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
+ pg_strcasecmp(prev_wd, "ON") == 0)
{
completion_info_charp = prev3_wd;
COMPLETE_WITH_QUERY(Query_for_index_of_table);
COMPLETE_WITH_LIST(list_TABLESET);
}
- /* If we have TABLE SET TABLESPACE provide a list of tablespaces*/
+ /* If we have TABLE SET TABLESPACE provide a list of tablespaces */
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
pg_strcasecmp(prev2_wd, "SET") == 0 &&
pg_strcasecmp(prev_wd, "TABLESPACE") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_tablespaces);
- /* If we have TABLE SET WITHOUT provide CLUSTER or OIDS*/
+ /* If we have TABLE SET WITHOUT provide CLUSTER or OIDS */
else if (pg_strcasecmp(prev4_wd, "TABLE") == 0 &&
pg_strcasecmp(prev2_wd, "SET") == 0 &&
pg_strcasecmp(prev_wd, "WITHOUT") == 0)
- {
+ {
static const char *const list_TABLESET2[] =
{"CLUSTER", "OIDS", NULL};
/* BEGIN, END, COMMIT, ABORT */
else if (pg_strcasecmp(prev_wd, "BEGIN") == 0 ||
- pg_strcasecmp(prev_wd, "END") == 0 ||
- pg_strcasecmp(prev_wd, "COMMIT") == 0 ||
- pg_strcasecmp(prev_wd, "ABORT") == 0)
+ pg_strcasecmp(prev_wd, "END") == 0 ||
+ pg_strcasecmp(prev_wd, "COMMIT") == 0 ||
+ pg_strcasecmp(prev_wd, "ABORT") == 0)
{
- static const char * const list_TRANS[] =
+ static const char *const list_TRANS[] =
{"WORK", "TRANSACTION", NULL};
COMPLETE_WITH_LIST(list_TRANS);
}
/* RELEASE SAVEPOINT */
- else if ( pg_strcasecmp(prev_wd, "RELEASE") == 0 )
+ else if (pg_strcasecmp(prev_wd, "RELEASE") == 0)
COMPLETE_WITH_CONST("SAVEPOINT");
/* ROLLBACK*/
- else if ( pg_strcasecmp(prev_wd, "ROLLBACK") == 0 )
+ else if (pg_strcasecmp(prev_wd, "ROLLBACK") == 0)
{
- static const char * const list_TRANS[] =
+ static const char *const list_TRANS[] =
{"WORK", "TRANSACTION", "TO SAVEPOINT", NULL};
COMPLETE_WITH_LIST(list_TRANS);
}
/* CLUSTER */
- /* If the previous word is CLUSTER and not without produce list
- * of indexes. */
+
+ /*
+ * If the previous word is CLUSTER and not without produce list of
+ * indexes.
+ */
else if (pg_strcasecmp(prev_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev2_wd, "WITHOUT") != 0)
+ pg_strcasecmp(prev2_wd, "WITHOUT") != 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
/* If we have CLUSTER , then add "ON" */
- else if (pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
- pg_strcasecmp(prev_wd,"ON") != 0)
+ else if (pg_strcasecmp(prev2_wd, "CLUSTER") == 0 &&
+ pg_strcasecmp(prev_wd, "ON") != 0)
COMPLETE_WITH_CONST("ON");
/*
{
static const char *const list_COMMENT[] =
{"CAST", "CONVERSION", "DATABASE", "INDEX", "LANGUAGE", "RULE", "SCHEMA",
- "SEQUENCE", "TABLE", "TYPE", "VIEW", "COLUMN", "AGGREGATE", "FUNCTION",
- "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", NULL};
+ "SEQUENCE", "TABLE", "TYPE", "VIEW", "COLUMN", "AGGREGATE", "FUNCTION",
+ "OPERATOR", "TRIGGER", "CONSTRAINT", "DOMAIN", NULL};
COMPLETE_WITH_LIST(list_COMMENT);
}
else if (pg_strcasecmp(prev_wd, "FETCH") == 0 ||
pg_strcasecmp(prev_wd, "MOVE") == 0)
{
- static const char * const list_FETCH1[] =
+ static const char *const list_FETCH1[] =
{"ABSOLUT", "BACKWARD", "FORWARD", "RELATIVE", NULL};
COMPLETE_WITH_LIST(list_FETCH1);
else if (pg_strcasecmp(prev2_wd, "FETCH") == 0 ||
pg_strcasecmp(prev2_wd, "MOVE") == 0)
{
- static const char * const list_FETCH2[] =
+ static const char *const list_FETCH2[] =
{"ALL", "NEXT", "PRIOR", NULL};
COMPLETE_WITH_LIST(list_FETCH2);
else if (pg_strcasecmp(prev3_wd, "FETCH") == 0 ||
pg_strcasecmp(prev3_wd, "MOVE") == 0)
{
- static const char * const list_FROMTO[] =
+ static const char *const list_FROMTO[] =
{"FROM", "TO", NULL};
COMPLETE_WITH_LIST(list_FROMTO);
else if (pg_strcasecmp(prev_wd, "GRANT") == 0 ||
pg_strcasecmp(prev_wd, "REVOKE") == 0)
{
- static const char * const list_privileg[] =
+ static const char *const list_privileg[] =
{"SELECT", "INSERT", "UPDATE", "DELETE", "RULE", "REFERENCES",
- "TRIGGER", "CREATE", "TEMPORARY", "EXECUTE", "USAGE", "ALL", NULL};
+ "TRIGGER", "CREATE", "TEMPORARY", "EXECUTE", "USAGE", "ALL", NULL};
COMPLETE_WITH_LIST(list_privileg);
}
else if (pg_strcasecmp(prev3_wd, "INSERT") == 0 &&
pg_strcasecmp(prev2_wd, "INTO") == 0)
{
- static const char * const list_INSERT[] =
+ static const char *const list_INSERT[] =
{"DEFAULT VALUES", "SELECT", "VALUES", NULL};
COMPLETE_WITH_LIST(list_INSERT);
pg_strcasecmp(prev3_wd, "INTO") == 0 &&
prev_wd[strlen(prev_wd) - 1] == ')')
{
- static const char * const list_INSERT[] =
+ static const char *const list_INSERT[] =
{"SELECT", "VALUES", NULL};
COMPLETE_WITH_LIST(list_INSERT);
(pg_strcasecmp(prev3_wd, "TABLE") == 0 &&
pg_strcasecmp(prev4_wd, "LOCK") == 0)))
{
- static const char * const lock_modes[] =
+ static const char *const lock_modes[] =
{"ACCESS SHARE MODE",
- "ROW SHARE MODE", "ROW EXCLUSIVE MODE",
- "SHARE UPDATE EXCLUSIVE MODE", "SHARE MODE",
- "SHARE ROW EXCLUSIVE MODE",
- "EXCLUSIVE MODE", "ACCESS EXCLUSIVE MODE", NULL};
+ "ROW SHARE MODE", "ROW EXCLUSIVE MODE",
+ "SHARE UPDATE EXCLUSIVE MODE", "SHARE MODE",
+ "SHARE ROW EXCLUSIVE MODE",
+ "EXCLUSIVE MODE", "ACCESS EXCLUSIVE MODE", NULL};
COMPLETE_WITH_LIST(lock_modes);
}
COMPLETE_WITH_QUERY("SELECT pg_catalog.quote_ident(relname) FROM pg_catalog.pg_listener WHERE substring(pg_catalog.quote_ident(relname),1,%d)='%s'");
/* OWNER TO - complete with available users*/
else if (pg_strcasecmp(prev2_wd, "OWNER") == 0 &&
- pg_strcasecmp(prev_wd, "TO") == 0)
+ pg_strcasecmp(prev_wd, "TO") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_users);
/* REINDEX */
else if (pg_strcasecmp(prev_wd, "REINDEX") == 0)
{
- static const char * const list_REINDEX[] =
+ static const char *const list_REINDEX[] =
{"TABLE", "DATABASE", "INDEX", NULL};
COMPLETE_WITH_LIST(list_REINDEX);
&& pg_strcasecmp(prev2_wd, "AS") == 0
&& pg_strcasecmp(prev_wd, "TRANSACTION") == 0))
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"ISOLATION", "READ", NULL};
COMPLETE_WITH_LIST(my_list);
|| (pg_strcasecmp(prev4_wd, "CHARACTERISTICS") == 0
&& pg_strcasecmp(prev3_wd, "AS") == 0))
&& (pg_strcasecmp(prev2_wd, "TRANSACTION") == 0
- || pg_strcasecmp(prev2_wd, "WORK") == 0)
+ || pg_strcasecmp(prev2_wd, "WORK") == 0)
&& pg_strcasecmp(prev_wd, "ISOLATION") == 0)
COMPLETE_WITH_CONST("LEVEL");
else if ((pg_strcasecmp(prev4_wd, "SET") == 0
|| pg_strcasecmp(prev4_wd, "START") == 0
|| pg_strcasecmp(prev4_wd, "AS") == 0)
&& (pg_strcasecmp(prev3_wd, "TRANSACTION") == 0
- || pg_strcasecmp(prev3_wd, "WORK") == 0)
+ || pg_strcasecmp(prev3_wd, "WORK") == 0)
&& pg_strcasecmp(prev2_wd, "ISOLATION") == 0
&& pg_strcasecmp(prev_wd, "LEVEL") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"READ", "REPEATABLE", "SERIALIZABLE", NULL};
COMPLETE_WITH_LIST(my_list);
}
else if ((pg_strcasecmp(prev4_wd, "TRANSACTION") == 0 ||
- pg_strcasecmp(prev4_wd, "WORK") == 0) &&
+ pg_strcasecmp(prev4_wd, "WORK") == 0) &&
pg_strcasecmp(prev3_wd, "ISOLATION") == 0 &&
pg_strcasecmp(prev2_wd, "LEVEL") == 0 &&
pg_strcasecmp(prev_wd, "READ") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"UNCOMMITTED", "COMMITTED", NULL};
COMPLETE_WITH_LIST(my_list);
}
- else if ((pg_strcasecmp(prev4_wd, "TRANSACTION") == 0 ||
- pg_strcasecmp(prev4_wd, "WORK") == 0) &&
+ else if ((pg_strcasecmp(prev4_wd, "TRANSACTION") == 0 ||
+ pg_strcasecmp(prev4_wd, "WORK") == 0) &&
pg_strcasecmp(prev3_wd, "ISOLATION") == 0 &&
pg_strcasecmp(prev2_wd, "LEVEL") == 0 &&
pg_strcasecmp(prev_wd, "REPEATABLE") == 0)
COMPLETE_WITH_CONST("READ");
else if ((pg_strcasecmp(prev3_wd, "SET") == 0 ||
- pg_strcasecmp(prev3_wd, "BEGIN") == 0 ||
- pg_strcasecmp(prev3_wd, "START") == 0 ||
- pg_strcasecmp(prev3_wd, "AS") == 0) &&
+ pg_strcasecmp(prev3_wd, "BEGIN") == 0 ||
+ pg_strcasecmp(prev3_wd, "START") == 0 ||
+ pg_strcasecmp(prev3_wd, "AS") == 0) &&
(pg_strcasecmp(prev2_wd, "TRANSACTION") == 0 ||
- pg_strcasecmp(prev2_wd, "WORK") == 0) &&
+ pg_strcasecmp(prev2_wd, "WORK") == 0) &&
pg_strcasecmp(prev_wd, "READ") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"ONLY", "WRITE", NULL};
COMPLETE_WITH_LIST(my_list);
else if (pg_strcasecmp(prev3_wd, "SET") == 0 &&
pg_strcasecmp(prev2_wd, "CONSTRAINTS") == 0)
{
- static const char * const constraint_list[] =
+ static const char *const constraint_list[] =
{"DEFERRED", "IMMEDIATE", NULL};
COMPLETE_WITH_LIST(constraint_list);
else if (pg_strcasecmp(prev2_wd, "SET") == 0 &&
pg_strcasecmp(prev_wd, "SESSION") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"AUTHORIZATION", "CHARACTERISTICS AS TRANSACTION", NULL};
COMPLETE_WITH_LIST(my_list);
COMPLETE_WITH_CONST("TO");
/* Suggest possible variable values */
else if (pg_strcasecmp(prev3_wd, "SET") == 0 &&
- (pg_strcasecmp(prev_wd, "TO") == 0 || strcmp(prev_wd, "=") == 0))
+ (pg_strcasecmp(prev_wd, "TO") == 0 || strcmp(prev_wd, "=") == 0))
{
if (pg_strcasecmp(prev2_wd, "DateStyle") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"ISO", "SQL", "Postgres", "German",
- "YMD", "DMY", "MDY",
- "US", "European", "NonEuropean",
- "DEFAULT", NULL};
+ "YMD", "DMY", "MDY",
+ "US", "European", "NonEuropean",
+ "DEFAULT", NULL};
COMPLETE_WITH_LIST(my_list);
}
else if (pg_strcasecmp(prev2_wd, "GEQO") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"ON", "OFF", "DEFAULT", NULL};
COMPLETE_WITH_LIST(my_list);
}
else
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"DEFAULT", NULL};
COMPLETE_WITH_LIST(my_list);
COMPLETE_WITH_LIST(sql_commands);
else if (strcmp(prev_wd, "\\pset") == 0)
{
- static const char * const my_list[] =
+ static const char *const my_list[] =
{"format", "border", "expanded",
- "null", "fieldsep", "tuples_only", "title", "tableattr", "pager",
- "recordsep", NULL};
+ "null", "fieldsep", "tuples_only", "title", "tableattr", "pager",
+ "recordsep", NULL};
COMPLETE_WITH_LIST(my_list);
}
/* Set up suitably-escaped copies of textual inputs */
if (text)
{
- e_text = pg_malloc(strlen(text) * 2 + 1);
+ e_text = pg_malloc(strlen(text) *2 + 1);
PQescapeString(e_text, text, strlen(text));
}
else
if (completion_info_charp)
{
- size_t charp_len;
+ size_t charp_len;
charp_len = strlen(completion_info_charp);
e_info_charp = pg_malloc(charp_len * 2 + 1);
appendPQExpBuffer(&query_buffer, "substring(%s,1,%d)='%s'",
completion_squery->result,
string_length, e_text);
+
/*
- * When fetching relation names, suppress system catalogs unless
- * the input-so-far begins with "pg_". This is a compromise
- * between not offering system catalogs for completion at all,
- * and having them swamp the result when the input is just "p".
+ * When fetching relation names, suppress system catalogs
+ * unless the input-so-far begins with "pg_". This is a
+ * compromise between not offering system catalogs for
+ * completion at all, and having them swamp the result when
+ * the input is just "p".
*/
if (strcmp(completion_squery->catname,
"pg_catalog.pg_class c") == 0 &&
- strncmp(text, "pg_", 3) != 0)
+ strncmp(text, "pg_", 3) !=0)
{
appendPQExpBuffer(&query_buffer,
- " AND c.relnamespace <> (SELECT oid FROM"
- " pg_catalog.pg_namespace WHERE nspname = 'pg_catalog')");
+ " AND c.relnamespace <> (SELECT oid FROM"
+ " pg_catalog.pg_namespace WHERE nspname = 'pg_catalog')");
}
/*
- * Add in matching schema names, but only if there is more than
- * one potential match among schema names.
+ * Add in matching schema names, but only if there is more
+ * than one potential match among schema names.
*/
appendPQExpBuffer(&query_buffer, "\nUNION\n"
- "SELECT pg_catalog.quote_ident(n.nspname) || '.' "
+ "SELECT pg_catalog.quote_ident(n.nspname) || '.' "
"FROM pg_catalog.pg_namespace n "
"WHERE substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d)='%s'",
string_length, e_text);
string_length, e_text);
/*
- * Add in matching qualified names, but only if there is exactly
- * one schema matching the input-so-far.
+ * Add in matching qualified names, but only if there is
+ * exactly one schema matching the input-so-far.
*/
appendPQExpBuffer(&query_buffer, "\nUNION\n"
- "SELECT pg_catalog.quote_ident(n.nspname) || '.' || %s "
+ "SELECT pg_catalog.quote_ident(n.nspname) || '.' || %s "
"FROM %s, pg_catalog.pg_namespace n "
"WHERE %s = n.oid AND ",
qualresult,
appendPQExpBuffer(&query_buffer, "substring(pg_catalog.quote_ident(n.nspname) || '.' || %s,1,%d)='%s'",
qualresult,
string_length, e_text);
- /* This condition exploits the single-matching-schema rule to speed up the query */
+
+ /*
+ * This condition exploits the single-matching-schema rule to
+ * speed up the query
+ */
appendPQExpBuffer(&query_buffer,
" AND substring(pg_catalog.quote_ident(n.nspname) || '.',1,%d) ="
" substring('%s',1,pg_catalog.length(pg_catalog.quote_ident(n.nspname))+1)",
*
* Portions Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/bin/scripts/clusterdb.c,v 1.10 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/clusterdb.c,v 1.11 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "dumputils.h"
-static void
-cluster_one_database(const char *dbname, const char *table,
+static void cluster_one_database(const char *dbname, const char *table,
const char *host, const char *port,
const char *username, bool password,
const char *progname, bool echo, bool quiet);
-static void
-cluster_all_databases(const char *host, const char *port,
+static void cluster_all_databases(const char *host, const char *port,
const char *username, bool password,
const char *progname, bool echo, bool quiet);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.10 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/common.c,v 1.11 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return pw->pw_name;
#else
static char username[128]; /* remains after function exit */
- DWORD len = sizeof(username)-1;
+ DWORD len = sizeof(username) - 1;
if (!GetUserName(username, &len))
{
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/createlang.c,v 1.13 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/createlang.c,v 1.14 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
PQclear(result);
}
else
- validatorexists = true; /* don't try to create it */
+ validatorexists = true; /* don't try to create it */
/*
* Create the function(s) and the language
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.12 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/droplang.c,v 1.13 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
conn = connectDatabase(dbname, host, port, username, password, progname);
/*
- * Make sure the language is installed and find the OIDs of the handler
- * and validator functions
+ * Make sure the language is installed and find the OIDs of the
+ * handler and validator functions
*/
printfPQExpBuffer(&sql, "SELECT lanplcallfoid, lanvalidator FROM pg_language WHERE lanname = '%s' AND lanispl;", langname);
result = executeQuery(conn, sql.data, progname, echo);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/bin/scripts/vacuumdb.c,v 1.10 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/bin/scripts/vacuumdb.c,v 1.11 2004/08/29 05:06:54 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "common.h"
-static void
-vacuum_one_database(const char *dbname, bool full, bool verbose, bool analyze,
+static void vacuum_one_database(const char *dbname, bool full, bool verbose, bool analyze,
const char *table,
const char *host, const char *port,
const char *username, bool password,
const char *progname, bool echo, bool quiet);
-static void
-vacuum_all_databases(bool full, bool verbose, bool analyze,
+static void vacuum_all_databases(bool full, bool verbose, bool analyze,
const char *host, const char *port,
const char *username, bool password,
const char *progname, bool echo, bool quiet);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.41 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/gist.h,v 1.42 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* methods union andpick split takes it as one of args
*/
-typedef struct {
- int32 n; /* number of elements */
+typedef struct
+{
+ int32 n; /* number of elements */
GISTENTRY vector[1];
} GistEntryVector;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.57 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/hash.h,v 1.58 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* modeled after Margo Seltzer's hash implementation for unix.
Bucket hasho_bucket; /* bucket number this pg belongs to */
uint16 hasho_flag; /* page type code, see above */
uint16 hasho_filler; /* available for future use */
+
/*
* We presently set hasho_filler to HASHO_FILL (0x1234); this is for
* the convenience of pg_filedump, which otherwise would have a hard
typedef struct HashScanOpaqueData
{
/*
- * By definition, a hash scan should be examining only one bucket.
- * We record the bucket number here as soon as it is known.
+ * By definition, a hash scan should be examining only one bucket. We
+ * record the bucket number here as soon as it is known.
*/
Bucket hashso_bucket;
bool hashso_bucket_valid;
+
/*
* If we have a share lock on the bucket, we record it here. When
* hashso_bucket_blkno is zero, we have no such lock.
*/
- BlockNumber hashso_bucket_blkno;
+ BlockNumber hashso_bucket_blkno;
+
/*
- * We also want to remember which buffers we're currently examining in the
- * scan. We keep these buffers pinned (but not locked) across hashgettuple
- * calls, in order to avoid doing a ReadBuffer() for every tuple in the
- * index.
+ * We also want to remember which buffers we're currently examining in
+ * the scan. We keep these buffers pinned (but not locked) across
+ * hashgettuple calls, in order to avoid doing a ReadBuffer() for
+ * every tuple in the index.
*/
Buffer hashso_curbuf;
Buffer hashso_mrkbuf;
uint32 hashm_firstfree; /* lowest-number free ovflpage (bit#) */
uint32 hashm_nmaps; /* number of bitmap pages */
RegProcedure hashm_procid; /* hash procedure id from pg_proc */
- uint32 hashm_spares[HASH_MAX_SPLITPOINTS]; /* spare pages before
- * each splitpoint */
+ uint32 hashm_spares[HASH_MAX_SPLITPOINTS]; /* spare pages before
+ * each splitpoint */
BlockNumber hashm_mapp[HASH_MAX_BITMAPS]; /* blknos of ovfl bitmaps */
} HashMetaPageData;
extern Buffer _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf);
extern BlockNumber _hash_freeovflpage(Relation rel, Buffer ovflbuf);
extern void _hash_initbitmap(Relation rel, HashMetaPage metap,
- BlockNumber blkno);
+ BlockNumber blkno);
extern void _hash_squeezebucket(Relation rel,
- Bucket bucket, BlockNumber bucket_blkno);
+ Bucket bucket, BlockNumber bucket_blkno);
/* hashpage.c */
extern void _hash_getlock(Relation rel, BlockNumber whichlock, int access);
extern HashItem _hash_formitem(IndexTuple itup);
extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
- uint32 highmask, uint32 lowmask);
+ uint32 highmask, uint32 lowmask);
extern uint32 _hash_log2(uint32 num);
extern void _hash_checkpage(Relation rel, Page page, int flags);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.91 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/heapam.h,v 1.92 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
)
extern Datum heap_getsysattr(HeapTuple tup, int attnum, TupleDesc tupleDesc,
- bool *isnull);
+ bool *isnull);
/* ----------------
HeapTuple tuple, Buffer *userbuf, bool keep_buf,
PgStat_Info *pgstat_info);
extern bool heap_release_fetch(Relation relation, Snapshot snapshot,
- HeapTuple tuple, Buffer *userbuf, bool keep_buf,
- PgStat_Info *pgstat_info);
+ HeapTuple tuple, Buffer *userbuf, bool keep_buf,
+ PgStat_Info *pgstat_info);
extern ItemPointer heap_get_latest_tid(Relation relation, Snapshot snapshot,
ItemPointer tid);
extern int heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid,
CommandId cid, Snapshot crosscheck, bool wait);
extern int heap_update(Relation relation, ItemPointer otid, HeapTuple tup,
- ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait);
+ ItemPointer ctid, CommandId cid, Snapshot crosscheck, bool wait);
extern int heap_mark4update(Relation relation, HeapTuple tup,
Buffer *userbuf, CommandId cid);
extern HeapTuple heap_formtuple(TupleDesc tupleDescriptor,
Datum *values, char *nulls);
extern HeapTuple heap_modifytuple(HeapTuple tuple,
- Relation relation,
- Datum *replValues,
- char *replNulls,
- char *replActions);
+ Relation relation,
+ Datum *replValues,
+ char *replNulls,
+ char *replActions);
extern void heap_deformtuple(HeapTuple tuple, TupleDesc tupleDesc,
- Datum *values, char *nulls);
+ Datum *values, char *nulls);
extern void heap_freetuple(HeapTuple tuple);
extern HeapTuple heap_addheader(int natts, bool withoid, Size structlen, void *structure);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.69 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/htup.h,v 1.70 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int32 datum_typmod; /* -1, or identifier of a record type */
Oid datum_typeid; /* composite type OID, or RECORDOID */
+
/*
* Note: field ordering is chosen with thought that Oid might someday
* widen to 64 bits.
{
union
{
- HeapTupleFields t_heap;
- DatumTupleFields t_datum;
+ HeapTupleFields t_heap;
+ DatumTupleFields t_datum;
} t_choice;
ItemPointerData t_ctid; /* current TID of this or newer tuple */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.81 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/nbtree.h,v 1.82 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Buffer _bt_gettrueroot(Relation rel);
extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
- BlockNumber blkno, int access);
+ BlockNumber blkno, int access);
extern void _bt_relbuf(Relation rel, Buffer buf);
extern void _bt_wrtbuf(Relation rel, Buffer buf);
extern void _bt_wrtnorelbuf(Relation rel, Buffer buf);
* prototypes for functions in nbtsearch.c
*/
extern BTStack _bt_search(Relation rel,
- int keysz, ScanKey scankey, bool nextkey,
- Buffer *bufP, int access);
+ int keysz, ScanKey scankey, bool nextkey,
+ Buffer *bufP, int access);
extern Buffer _bt_moveright(Relation rel, Buffer buf, int keysz,
ScanKey scankey, bool nextkey, int access);
extern OffsetNumber _bt_binsrch(Relation rel, Buffer buf, int keysz,
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.26 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/skey.h,v 1.27 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* A ScanKey represents the application of a comparison operator between
- * a table or index column and a constant. When it's part of an array of
+ * a table or index column and a constant. When it's part of an array of
* ScanKeys, the comparison conditions are implicitly ANDed. The index
* column is the left argument of the operator, if it's a binary operator.
* (The data structure can support unary indexable operators too; in that
{
int sk_flags; /* flags, see below */
AttrNumber sk_attno; /* table or index column number */
- StrategyNumber sk_strategy; /* operator strategy number */
+ StrategyNumber sk_strategy; /* operator strategy number */
Oid sk_subtype; /* strategy subtype */
FmgrInfo sk_func; /* lookup info for function to call */
Datum sk_argument; /* data to compare */
* prototypes for functions in access/common/scankey.c
*/
extern void ScanKeyInit(ScanKey entry,
- AttrNumber attributeNumber,
- StrategyNumber strategy,
- RegProcedure procedure,
- Datum argument);
+ AttrNumber attributeNumber,
+ StrategyNumber strategy,
+ RegProcedure procedure,
+ Datum argument);
extern void ScanKeyEntryInitialize(ScanKey entry,
- int flags,
- AttrNumber attributeNumber,
- StrategyNumber strategy,
- Oid subtype,
- RegProcedure procedure,
- Datum argument);
+ int flags,
+ AttrNumber attributeNumber,
+ StrategyNumber strategy,
+ Oid subtype,
+ RegProcedure procedure,
+ Datum argument);
extern void ScanKeyEntryInitializeWithInfo(ScanKey entry,
- int flags,
- AttrNumber attributeNumber,
- StrategyNumber strategy,
- Oid subtype,
- FmgrInfo *finfo,
- Datum argument);
+ int flags,
+ AttrNumber attributeNumber,
+ StrategyNumber strategy,
+ Oid subtype,
+ FmgrInfo *finfo,
+ Datum argument);
#endif /* SKEY_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.9 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/slru.h,v 1.10 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
- * Number of page buffers. Ideally this could be different for CLOG and
+ * Number of page buffers. Ideally this could be different for CLOG and
* SUBTRANS, but the benefit doesn't seem to be worth any additional
* notational cruft.
*/
SlruShared shared;
/*
- * This flag tells whether to fsync writes (true for pg_clog,
- * false for pg_subtrans).
+ * This flag tells whether to fsync writes (true for pg_clog, false
+ * for pg_subtrans).
*/
bool do_fsync;
/*
- * Decide which of two page numbers is "older" for truncation purposes.
- * We need to use comparison of TransactionIds here in order to do the
- * right thing with wraparound XID arithmetic.
+ * Decide which of two page numbers is "older" for truncation
+ * purposes. We need to use comparison of TransactionIds here in order
+ * to do the right thing with wraparound XID arithmetic.
*/
bool (*PagePrecedes) (int, int);
extern int SimpleLruShmemSize(void);
extern void SimpleLruInit(SlruCtl ctl, const char *name,
- LWLockId ctllock, const char *subdir);
+ LWLockId ctllock, const char *subdir);
extern int SimpleLruZeroPage(SlruCtl ctl, int pageno);
extern int SimpleLruReadPage(SlruCtl ctl, int pageno, TransactionId xid);
extern void SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/tupdesc.h,v 1.44 2004/08/29 04:13:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/tupdesc.h,v 1.45 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* TupleDesc; with the exception that tdhasoid indicates if OID is present.
*
* If the tuple is known to correspond to a named rowtype (such as a table's
- * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
+ * rowtype) then tdtypeid identifies that type and tdtypmod is -1. Otherwise
* tdtypeid is RECORDOID, and tdtypmod can be either -1 for a fully anonymous
* row type, or a value >= 0 to allow the rowtype to be looked up in the
* typcache.c type cache.
*
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.19 2004/08/29 04:13:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/tuptoaster.h,v 1.20 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If a Datum is of composite type, "flatten" it to contain no toasted fields.
* This must be invoked on any potentially-composite field that is to be
- * inserted into a tuple. Doing this preserves the invariant that toasting
+ * inserted into a tuple. Doing this preserves the invariant that toasting
* goes only one level deep in a tuple.
* ----------
*/
extern Datum toast_flatten_tuple_attribute(Datum value,
- Oid typeId, int32 typeMod);
+ Oid typeId, int32 typeMod);
/* ----------
* toast_compress_datum -
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.70 2004/08/29 04:13:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/xact.h,v 1.71 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int nrels; /* number of RelFileNodes */
int nsubxacts; /* number of subtransaction XIDs */
/* Array of RelFileNode(s) to drop at commit */
- RelFileNode xnodes[1]; /* VARIABLE LENGTH ARRAY */
+ RelFileNode xnodes[1]; /* VARIABLE LENGTH ARRAY */
/* ARRAY OF COMMITTED SUBTRANSACTION XIDs FOLLOWS */
} xl_xact_commit;
-#define MinSizeOfXactCommit offsetof(xl_xact_commit, xnodes)
+#define MinSizeOfXactCommit offsetof(xl_xact_commit, xnodes)
typedef struct xl_xact_abort
{
int nrels; /* number of RelFileNodes */
int nsubxacts; /* number of subtransaction XIDs */
/* Array of RelFileNode(s) to drop at abort */
- RelFileNode xnodes[1]; /* VARIABLE LENGTH ARRAY */
+ RelFileNode xnodes[1]; /* VARIABLE LENGTH ARRAY */
/* ARRAY OF ABORTED SUBTRANSACTION XIDs FOLLOWS */
} xl_xact_abort;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.56 2004/08/29 04:13:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog.h,v 1.57 2004/08/29 05:06:55 momjian Exp $
*/
#ifndef XLOG_H
#define XLOG_H
struct XLogRecData *next;
} XLogRecData;
-extern TimeLineID ThisTimeLineID; /* current TLI */
+extern TimeLineID ThisTimeLineID; /* current TLI */
extern bool InRecovery;
extern XLogRecPtr MyLastRecPtr;
extern bool MyXactMadeXLogEntry;
#define XLogArchivingActive() (XLogArchiveCommand[0] != '\0')
#ifdef WAL_DEBUG
-extern bool XLOG_DEBUG;
+extern bool XLOG_DEBUG;
#endif
extern XLogRecPtr XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.3 2004/08/29 04:13:04 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/access/xlog_internal.h,v 1.4 2004/08/29 05:06:55 momjian Exp $
*/
#ifndef XLOG_INTERNAL_H
#define XLOG_INTERNAL_H
/*
* When the XLP_LONG_HEADER flag is set, we store additional fields in the
* page header. (This is ordinarily done just in the first page of an
- * XLOG file.) The additional fields serve to identify the file accurately.
+ * XLOG file.) The additional fields serve to identify the file accurately.
*/
typedef struct XLogLongPageHeaderData
{
#define StatusFilePath(path, xlog, suffix) \
snprintf(path, MAXPGPATH, "%s/archive_status/%s%s", XLogDir, xlog, suffix)
-#define BackupHistoryFileName(fname, tli, log, seg, offset) \
+#define BackupHistoryFileName(fname, tli, log, seg, offset) \
snprintf(fname, MAXFNAMELEN, "%08X%08X%08X.%08X.backup", tli, log, seg, offset)
#define BackupHistoryFilePath(path, tli, log, seg, offset) \
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/c.h,v 1.167 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/c.h,v 1.168 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef unsigned long long int uint64;
#endif
-#else /* not HAVE_LONG_INT_64 and not HAVE_LONG_LONG_INT_64 */
+#else /* not HAVE_LONG_INT_64 and not
+ * HAVE_LONG_LONG_INT_64 */
/* Won't actually work, but fall back to long int so that code compiles */
#ifndef HAVE_INT64
#endif
#define INT64_IS_BUSTED
-#endif /* not HAVE_LONG_INT_64 and not HAVE_LONG_LONG_INT_64 */
+#endif /* not HAVE_LONG_INT_64 and not
+ * HAVE_LONG_LONG_INT_64 */
/* Decide if we need to decorate 64-bit constants */
#ifdef HAVE_LL_CONSTANTS
/*
* NOTE: this is also used for opening text files.
- * WIN32 treats Control-Z as EOF in files opened in text mode.
- * Therefore, we open files in binary mode on Win32 so we can read
- * literal control-Z. The other affect is that we see CRLF, but
- * that is OK because we can already handle those cleanly.
+ * WIN32 treats Control-Z as EOF in files opened in text mode.
+ * Therefore, we open files in binary mode on Win32 so we can read
+ * literal control-Z. The other affect is that we see CRLF, but
+ * that is OK because we can already handle those cleanly.
*/
#if defined(__CYGWIN__) || defined(WIN32)
#define PG_BINARY O_BINARY
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.32 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/namespace.h,v 1.33 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void InitializeSearchPath(void);
extern void AtEOXact_Namespace(bool isCommit);
extern void AtEOSubXact_Namespace(bool isCommit, TransactionId myXid,
- TransactionId parentXid);
+ TransactionId parentXid);
/* stuff for search_path GUC variable */
extern char *namespace_search_path;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.45 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_aggregate.h,v 1.46 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert ( 2159 numeric_accum numeric_stddev 1231 "{0,0,0}" ));
/* boolean-and and boolean-or */
-DATA(insert ( 2517 booland_statefunc - 16 _null_ ));
-DATA(insert ( 2518 boolor_statefunc - 16 _null_ ));
-DATA(insert ( 2519 booland_statefunc - 16 _null_ ));
+DATA(insert ( 2517 booland_statefunc - 16 _null_ ));
+DATA(insert ( 2518 boolor_statefunc - 16 _null_ ));
+DATA(insert ( 2519 booland_statefunc - 16 _null_ ));
/* bitwise integer */
-DATA(insert ( 2236 int2and - 21 _null_ ));
-DATA(insert ( 2237 int2or - 21 _null_ ));
-DATA(insert ( 2238 int4and - 23 _null_ ));
-DATA(insert ( 2239 int4or - 23 _null_ ));
-DATA(insert ( 2240 int8and - 20 _null_ ));
-DATA(insert ( 2241 int8or - 20 _null_ ));
-DATA(insert ( 2242 bitand - 1560 _null_ ));
-DATA(insert ( 2243 bitor - 1560 _null_ ));
+DATA(insert ( 2236 int2and - 21 _null_ ));
+DATA(insert ( 2237 int2or - 21 _null_ ));
+DATA(insert ( 2238 int4and - 23 _null_ ));
+DATA(insert ( 2239 int4or - 23 _null_ ));
+DATA(insert ( 2240 int8and - 20 _null_ ));
+DATA(insert ( 2241 int8or - 20 _null_ ));
+DATA(insert ( 2242 bitand - 1560 _null_ ));
+DATA(insert ( 2243 bitor - 1560 _null_ ));
/*
* prototypes for functions in pg_aggregate.c
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.60 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amop.h,v 1.61 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* rtree box_ops
*/
-DATA(insert ( 425 0 1 f 493 ));
-DATA(insert ( 425 0 2 f 494 ));
-DATA(insert ( 425 0 3 f 500 ));
-DATA(insert ( 425 0 4 f 495 ));
-DATA(insert ( 425 0 5 f 496 ));
-DATA(insert ( 425 0 6 f 499 ));
-DATA(insert ( 425 0 7 f 498 ));
-DATA(insert ( 425 0 8 f 497 ));
+DATA(insert ( 425 0 1 f 493 ));
+DATA(insert ( 425 0 2 f 494 ));
+DATA(insert ( 425 0 3 f 500 ));
+DATA(insert ( 425 0 4 f 495 ));
+DATA(insert ( 425 0 5 f 496 ));
+DATA(insert ( 425 0 6 f 499 ));
+DATA(insert ( 425 0 7 f 498 ));
+DATA(insert ( 425 0 8 f 497 ));
/*
* rtree poly_ops (supports polygons)
*/
-DATA(insert ( 1993 0 1 f 485 ));
-DATA(insert ( 1993 0 2 f 486 ));
-DATA(insert ( 1993 0 3 f 492 ));
-DATA(insert ( 1993 0 4 f 487 ));
-DATA(insert ( 1993 0 5 f 488 ));
-DATA(insert ( 1993 0 6 f 491 ));
-DATA(insert ( 1993 0 7 f 490 ));
-DATA(insert ( 1993 0 8 f 489 ));
+DATA(insert ( 1993 0 1 f 485 ));
+DATA(insert ( 1993 0 2 f 486 ));
+DATA(insert ( 1993 0 3 f 492 ));
+DATA(insert ( 1993 0 4 f 487 ));
+DATA(insert ( 1993 0 5 f 488 ));
+DATA(insert ( 1993 0 6 f 491 ));
+DATA(insert ( 1993 0 7 f 490 ));
+DATA(insert ( 1993 0 8 f 489 ));
/*
* btree int2_ops
*/
-DATA(insert ( 1976 0 1 f 95 ));
-DATA(insert ( 1976 0 2 f 522 ));
-DATA(insert ( 1976 0 3 f 94 ));
-DATA(insert ( 1976 0 4 f 524 ));
-DATA(insert ( 1976 0 5 f 520 ));
+DATA(insert ( 1976 0 1 f 95 ));
+DATA(insert ( 1976 0 2 f 522 ));
+DATA(insert ( 1976 0 3 f 94 ));
+DATA(insert ( 1976 0 4 f 524 ));
+DATA(insert ( 1976 0 5 f 520 ));
/* crosstype operators int24 */
DATA(insert ( 1976 23 1 f 534 ));
DATA(insert ( 1976 23 2 f 540 ));
* btree int4_ops
*/
-DATA(insert ( 1978 0 1 f 97 ));
-DATA(insert ( 1978 0 2 f 523 ));
-DATA(insert ( 1978 0 3 f 96 ));
-DATA(insert ( 1978 0 4 f 525 ));
-DATA(insert ( 1978 0 5 f 521 ));
+DATA(insert ( 1978 0 1 f 97 ));
+DATA(insert ( 1978 0 2 f 523 ));
+DATA(insert ( 1978 0 3 f 96 ));
+DATA(insert ( 1978 0 4 f 525 ));
+DATA(insert ( 1978 0 5 f 521 ));
/* crosstype operators int42 */
DATA(insert ( 1978 21 1 f 535 ));
DATA(insert ( 1978 21 2 f 541 ));
DATA(insert ( 1978 21 4 f 543 ));
DATA(insert ( 1978 21 5 f 537 ));
/* crosstype operators int48 */
-DATA(insert ( 1978 20 1 f 37 ));
-DATA(insert ( 1978 20 2 f 80 ));
-DATA(insert ( 1978 20 3 f 15 ));
-DATA(insert ( 1978 20 4 f 82 ));
-DATA(insert ( 1978 20 5 f 76 ));
+DATA(insert ( 1978 20 1 f 37 ));
+DATA(insert ( 1978 20 2 f 80 ));
+DATA(insert ( 1978 20 3 f 15 ));
+DATA(insert ( 1978 20 4 f 82 ));
+DATA(insert ( 1978 20 5 f 76 ));
/*
* btree int8_ops
*/
-DATA(insert ( 1980 0 1 f 412 ));
-DATA(insert ( 1980 0 2 f 414 ));
-DATA(insert ( 1980 0 3 f 410 ));
-DATA(insert ( 1980 0 4 f 415 ));
-DATA(insert ( 1980 0 5 f 413 ));
+DATA(insert ( 1980 0 1 f 412 ));
+DATA(insert ( 1980 0 2 f 414 ));
+DATA(insert ( 1980 0 3 f 410 ));
+DATA(insert ( 1980 0 4 f 415 ));
+DATA(insert ( 1980 0 5 f 413 ));
/* crosstype operators int82 */
DATA(insert ( 1980 21 1 f 1870 ));
DATA(insert ( 1980 21 2 f 1872 ));
* btree oid_ops
*/
-DATA(insert ( 1989 0 1 f 609 ));
-DATA(insert ( 1989 0 2 f 611 ));
-DATA(insert ( 1989 0 3 f 607 ));
-DATA(insert ( 1989 0 4 f 612 ));
-DATA(insert ( 1989 0 5 f 610 ));
+DATA(insert ( 1989 0 1 f 609 ));
+DATA(insert ( 1989 0 2 f 611 ));
+DATA(insert ( 1989 0 3 f 607 ));
+DATA(insert ( 1989 0 4 f 612 ));
+DATA(insert ( 1989 0 5 f 610 ));
/*
* btree oidvector_ops
*/
-DATA(insert ( 1991 0 1 f 645 ));
-DATA(insert ( 1991 0 2 f 647 ));
-DATA(insert ( 1991 0 3 f 649 ));
-DATA(insert ( 1991 0 4 f 648 ));
-DATA(insert ( 1991 0 5 f 646 ));
+DATA(insert ( 1991 0 1 f 645 ));
+DATA(insert ( 1991 0 2 f 647 ));
+DATA(insert ( 1991 0 3 f 649 ));
+DATA(insert ( 1991 0 4 f 648 ));
+DATA(insert ( 1991 0 5 f 646 ));
/*
* btree float4_ops
*/
-DATA(insert ( 1970 0 1 f 622 ));
-DATA(insert ( 1970 0 2 f 624 ));
-DATA(insert ( 1970 0 3 f 620 ));
-DATA(insert ( 1970 0 4 f 625 ));
-DATA(insert ( 1970 0 5 f 623 ));
+DATA(insert ( 1970 0 1 f 622 ));
+DATA(insert ( 1970 0 2 f 624 ));
+DATA(insert ( 1970 0 3 f 620 ));
+DATA(insert ( 1970 0 4 f 625 ));
+DATA(insert ( 1970 0 5 f 623 ));
/* crosstype operators float48 */
DATA(insert ( 1970 701 1 f 1122 ));
DATA(insert ( 1970 701 2 f 1124 ));
* btree float8_ops
*/
-DATA(insert ( 1972 0 1 f 672 ));
-DATA(insert ( 1972 0 2 f 673 ));
-DATA(insert ( 1972 0 3 f 670 ));
-DATA(insert ( 1972 0 4 f 675 ));
-DATA(insert ( 1972 0 5 f 674 ));
+DATA(insert ( 1972 0 1 f 672 ));
+DATA(insert ( 1972 0 2 f 673 ));
+DATA(insert ( 1972 0 3 f 670 ));
+DATA(insert ( 1972 0 4 f 675 ));
+DATA(insert ( 1972 0 5 f 674 ));
/* crosstype operators float84 */
DATA(insert ( 1972 700 1 f 1132 ));
DATA(insert ( 1972 700 2 f 1134 ));
* btree char_ops
*/
-DATA(insert ( 429 0 1 f 631 ));
-DATA(insert ( 429 0 2 f 632 ));
-DATA(insert ( 429 0 3 f 92 ));
-DATA(insert ( 429 0 4 f 634 ));
-DATA(insert ( 429 0 5 f 633 ));
+DATA(insert ( 429 0 1 f 631 ));
+DATA(insert ( 429 0 2 f 632 ));
+DATA(insert ( 429 0 3 f 92 ));
+DATA(insert ( 429 0 4 f 634 ));
+DATA(insert ( 429 0 5 f 633 ));
/*
* btree name_ops
*/
-DATA(insert ( 1986 0 1 f 660 ));
-DATA(insert ( 1986 0 2 f 661 ));
-DATA(insert ( 1986 0 3 f 93 ));
-DATA(insert ( 1986 0 4 f 663 ));
-DATA(insert ( 1986 0 5 f 662 ));
+DATA(insert ( 1986 0 1 f 660 ));
+DATA(insert ( 1986 0 2 f 661 ));
+DATA(insert ( 1986 0 3 f 93 ));
+DATA(insert ( 1986 0 4 f 663 ));
+DATA(insert ( 1986 0 5 f 662 ));
/*
* btree text_ops
*/
-DATA(insert ( 1994 0 1 f 664 ));
-DATA(insert ( 1994 0 2 f 665 ));
-DATA(insert ( 1994 0 3 f 98 ));
-DATA(insert ( 1994 0 4 f 667 ));
-DATA(insert ( 1994 0 5 f 666 ));
+DATA(insert ( 1994 0 1 f 664 ));
+DATA(insert ( 1994 0 2 f 665 ));
+DATA(insert ( 1994 0 3 f 98 ));
+DATA(insert ( 1994 0 4 f 667 ));
+DATA(insert ( 1994 0 5 f 666 ));
/*
* btree bpchar_ops
*/
-DATA(insert ( 426 0 1 f 1058 ));
-DATA(insert ( 426 0 2 f 1059 ));
-DATA(insert ( 426 0 3 f 1054 ));
-DATA(insert ( 426 0 4 f 1061 ));
-DATA(insert ( 426 0 5 f 1060 ));
+DATA(insert ( 426 0 1 f 1058 ));
+DATA(insert ( 426 0 2 f 1059 ));
+DATA(insert ( 426 0 3 f 1054 ));
+DATA(insert ( 426 0 4 f 1061 ));
+DATA(insert ( 426 0 5 f 1060 ));
/*
* btree varchar_ops (same operators as text_ops)
*/
-DATA(insert ( 2003 0 1 f 664 ));
-DATA(insert ( 2003 0 2 f 665 ));
-DATA(insert ( 2003 0 3 f 98 ));
-DATA(insert ( 2003 0 4 f 667 ));
-DATA(insert ( 2003 0 5 f 666 ));
+DATA(insert ( 2003 0 1 f 664 ));
+DATA(insert ( 2003 0 2 f 665 ));
+DATA(insert ( 2003 0 3 f 98 ));
+DATA(insert ( 2003 0 4 f 667 ));
+DATA(insert ( 2003 0 5 f 666 ));
/*
* btree bytea_ops
*/
-DATA(insert ( 428 0 1 f 1957 ));
-DATA(insert ( 428 0 2 f 1958 ));
-DATA(insert ( 428 0 3 f 1955 ));
-DATA(insert ( 428 0 4 f 1960 ));
-DATA(insert ( 428 0 5 f 1959 ));
+DATA(insert ( 428 0 1 f 1957 ));
+DATA(insert ( 428 0 2 f 1958 ));
+DATA(insert ( 428 0 3 f 1955 ));
+DATA(insert ( 428 0 4 f 1960 ));
+DATA(insert ( 428 0 5 f 1959 ));
/*
* btree abstime_ops
*/
-DATA(insert ( 421 0 1 f 562 ));
-DATA(insert ( 421 0 2 f 564 ));
-DATA(insert ( 421 0 3 f 560 ));
-DATA(insert ( 421 0 4 f 565 ));
-DATA(insert ( 421 0 5 f 563 ));
+DATA(insert ( 421 0 1 f 562 ));
+DATA(insert ( 421 0 2 f 564 ));
+DATA(insert ( 421 0 3 f 560 ));
+DATA(insert ( 421 0 4 f 565 ));
+DATA(insert ( 421 0 5 f 563 ));
/*
* btree date_ops
*/
-DATA(insert ( 434 0 1 f 1095 ));
-DATA(insert ( 434 0 2 f 1096 ));
-DATA(insert ( 434 0 3 f 1093 ));
-DATA(insert ( 434 0 4 f 1098 ));
-DATA(insert ( 434 0 5 f 1097 ));
+DATA(insert ( 434 0 1 f 1095 ));
+DATA(insert ( 434 0 2 f 1096 ));
+DATA(insert ( 434 0 3 f 1093 ));
+DATA(insert ( 434 0 4 f 1098 ));
+DATA(insert ( 434 0 5 f 1097 ));
/* crosstype operators vs timestamp */
DATA(insert ( 434 1114 1 f 2345 ));
DATA(insert ( 434 1114 2 f 2346 ));
* btree time_ops
*/
-DATA(insert ( 1996 0 1 f 1110 ));
-DATA(insert ( 1996 0 2 f 1111 ));
-DATA(insert ( 1996 0 3 f 1108 ));
-DATA(insert ( 1996 0 4 f 1113 ));
-DATA(insert ( 1996 0 5 f 1112 ));
+DATA(insert ( 1996 0 1 f 1110 ));
+DATA(insert ( 1996 0 2 f 1111 ));
+DATA(insert ( 1996 0 3 f 1108 ));
+DATA(insert ( 1996 0 4 f 1113 ));
+DATA(insert ( 1996 0 5 f 1112 ));
/*
* btree timetz_ops
*/
-DATA(insert ( 2000 0 1 f 1552 ));
-DATA(insert ( 2000 0 2 f 1553 ));
-DATA(insert ( 2000 0 3 f 1550 ));
-DATA(insert ( 2000 0 4 f 1555 ));
-DATA(insert ( 2000 0 5 f 1554 ));
+DATA(insert ( 2000 0 1 f 1552 ));
+DATA(insert ( 2000 0 2 f 1553 ));
+DATA(insert ( 2000 0 3 f 1550 ));
+DATA(insert ( 2000 0 4 f 1555 ));
+DATA(insert ( 2000 0 5 f 1554 ));
/*
* btree timestamp_ops
*/
-DATA(insert ( 2039 0 1 f 2062 ));
-DATA(insert ( 2039 0 2 f 2063 ));
-DATA(insert ( 2039 0 3 f 2060 ));
-DATA(insert ( 2039 0 4 f 2065 ));
-DATA(insert ( 2039 0 5 f 2064 ));
+DATA(insert ( 2039 0 1 f 2062 ));
+DATA(insert ( 2039 0 2 f 2063 ));
+DATA(insert ( 2039 0 3 f 2060 ));
+DATA(insert ( 2039 0 4 f 2065 ));
+DATA(insert ( 2039 0 5 f 2064 ));
/* crosstype operators vs date */
DATA(insert ( 2039 1082 1 f 2371 ));
DATA(insert ( 2039 1082 2 f 2372 ));
* btree timestamptz_ops
*/
-DATA(insert ( 1998 0 1 f 1322 ));
-DATA(insert ( 1998 0 2 f 1323 ));
-DATA(insert ( 1998 0 3 f 1320 ));
-DATA(insert ( 1998 0 4 f 1325 ));
-DATA(insert ( 1998 0 5 f 1324 ));
+DATA(insert ( 1998 0 1 f 1322 ));
+DATA(insert ( 1998 0 2 f 1323 ));
+DATA(insert ( 1998 0 3 f 1320 ));
+DATA(insert ( 1998 0 4 f 1325 ));
+DATA(insert ( 1998 0 5 f 1324 ));
/* crosstype operators vs date */
DATA(insert ( 1998 1082 1 f 2384 ));
DATA(insert ( 1998 1082 2 f 2385 ));
* btree interval_ops
*/
-DATA(insert ( 1982 0 1 f 1332 ));
-DATA(insert ( 1982 0 2 f 1333 ));
-DATA(insert ( 1982 0 3 f 1330 ));
-DATA(insert ( 1982 0 4 f 1335 ));
-DATA(insert ( 1982 0 5 f 1334 ));
+DATA(insert ( 1982 0 1 f 1332 ));
+DATA(insert ( 1982 0 2 f 1333 ));
+DATA(insert ( 1982 0 3 f 1330 ));
+DATA(insert ( 1982 0 4 f 1335 ));
+DATA(insert ( 1982 0 5 f 1334 ));
/*
* btree macaddr
*/
-DATA(insert ( 1984 0 1 f 1222 ));
-DATA(insert ( 1984 0 2 f 1223 ));
-DATA(insert ( 1984 0 3 f 1220 ));
-DATA(insert ( 1984 0 4 f 1225 ));
-DATA(insert ( 1984 0 5 f 1224 ));
+DATA(insert ( 1984 0 1 f 1222 ));
+DATA(insert ( 1984 0 2 f 1223 ));
+DATA(insert ( 1984 0 3 f 1220 ));
+DATA(insert ( 1984 0 4 f 1225 ));
+DATA(insert ( 1984 0 5 f 1224 ));
/*
* btree inet
*/
-DATA(insert ( 1974 0 1 f 1203 ));
-DATA(insert ( 1974 0 2 f 1204 ));
-DATA(insert ( 1974 0 3 f 1201 ));
-DATA(insert ( 1974 0 4 f 1206 ));
-DATA(insert ( 1974 0 5 f 1205 ));
+DATA(insert ( 1974 0 1 f 1203 ));
+DATA(insert ( 1974 0 2 f 1204 ));
+DATA(insert ( 1974 0 3 f 1201 ));
+DATA(insert ( 1974 0 4 f 1206 ));
+DATA(insert ( 1974 0 5 f 1205 ));
/*
* btree cidr
*/
-DATA(insert ( 432 0 1 f 822 ));
-DATA(insert ( 432 0 2 f 823 ));
-DATA(insert ( 432 0 3 f 820 ));
-DATA(insert ( 432 0 4 f 825 ));
-DATA(insert ( 432 0 5 f 824 ));
+DATA(insert ( 432 0 1 f 822 ));
+DATA(insert ( 432 0 2 f 823 ));
+DATA(insert ( 432 0 3 f 820 ));
+DATA(insert ( 432 0 4 f 825 ));
+DATA(insert ( 432 0 5 f 824 ));
/*
* btree numeric
*/
-DATA(insert ( 1988 0 1 f 1754 ));
-DATA(insert ( 1988 0 2 f 1755 ));
-DATA(insert ( 1988 0 3 f 1752 ));
-DATA(insert ( 1988 0 4 f 1757 ));
-DATA(insert ( 1988 0 5 f 1756 ));
+DATA(insert ( 1988 0 1 f 1754 ));
+DATA(insert ( 1988 0 2 f 1755 ));
+DATA(insert ( 1988 0 3 f 1752 ));
+DATA(insert ( 1988 0 4 f 1757 ));
+DATA(insert ( 1988 0 5 f 1756 ));
/*
* btree bool
*/
-DATA(insert ( 424 0 1 f 58 ));
-DATA(insert ( 424 0 2 f 1694 ));
-DATA(insert ( 424 0 3 f 91 ));
-DATA(insert ( 424 0 4 f 1695 ));
-DATA(insert ( 424 0 5 f 59 ));
+DATA(insert ( 424 0 1 f 58 ));
+DATA(insert ( 424 0 2 f 1694 ));
+DATA(insert ( 424 0 3 f 91 ));
+DATA(insert ( 424 0 4 f 1695 ));
+DATA(insert ( 424 0 5 f 59 ));
/*
* btree bit
*/
-DATA(insert ( 423 0 1 f 1786 ));
-DATA(insert ( 423 0 2 f 1788 ));
-DATA(insert ( 423 0 3 f 1784 ));
-DATA(insert ( 423 0 4 f 1789 ));
-DATA(insert ( 423 0 5 f 1787 ));
+DATA(insert ( 423 0 1 f 1786 ));
+DATA(insert ( 423 0 2 f 1788 ));
+DATA(insert ( 423 0 3 f 1784 ));
+DATA(insert ( 423 0 4 f 1789 ));
+DATA(insert ( 423 0 5 f 1787 ));
/*
* btree varbit
*/
-DATA(insert ( 2002 0 1 f 1806 ));
-DATA(insert ( 2002 0 2 f 1808 ));
-DATA(insert ( 2002 0 3 f 1804 ));
-DATA(insert ( 2002 0 4 f 1809 ));
-DATA(insert ( 2002 0 5 f 1807 ));
+DATA(insert ( 2002 0 1 f 1806 ));
+DATA(insert ( 2002 0 2 f 1808 ));
+DATA(insert ( 2002 0 3 f 1804 ));
+DATA(insert ( 2002 0 4 f 1809 ));
+DATA(insert ( 2002 0 5 f 1807 ));
/*
* btree text pattern
*/
-DATA(insert ( 2095 0 1 f 2314 ));
-DATA(insert ( 2095 0 2 f 2315 ));
-DATA(insert ( 2095 0 3 f 2316 ));
-DATA(insert ( 2095 0 4 f 2317 ));
-DATA(insert ( 2095 0 5 f 2318 ));
+DATA(insert ( 2095 0 1 f 2314 ));
+DATA(insert ( 2095 0 2 f 2315 ));
+DATA(insert ( 2095 0 3 f 2316 ));
+DATA(insert ( 2095 0 4 f 2317 ));
+DATA(insert ( 2095 0 5 f 2318 ));
/*
* btree varchar pattern (same operators as text)
*/
-DATA(insert ( 2096 0 1 f 2314 ));
-DATA(insert ( 2096 0 2 f 2315 ));
-DATA(insert ( 2096 0 3 f 2316 ));
-DATA(insert ( 2096 0 4 f 2317 ));
-DATA(insert ( 2096 0 5 f 2318 ));
+DATA(insert ( 2096 0 1 f 2314 ));
+DATA(insert ( 2096 0 2 f 2315 ));
+DATA(insert ( 2096 0 3 f 2316 ));
+DATA(insert ( 2096 0 4 f 2317 ));
+DATA(insert ( 2096 0 5 f 2318 ));
/*
* btree bpchar pattern
*/
-DATA(insert ( 2097 0 1 f 2326 ));
-DATA(insert ( 2097 0 2 f 2327 ));
-DATA(insert ( 2097 0 3 f 2328 ));
-DATA(insert ( 2097 0 4 f 2329 ));
-DATA(insert ( 2097 0 5 f 2330 ));
+DATA(insert ( 2097 0 1 f 2326 ));
+DATA(insert ( 2097 0 2 f 2327 ));
+DATA(insert ( 2097 0 3 f 2328 ));
+DATA(insert ( 2097 0 4 f 2329 ));
+DATA(insert ( 2097 0 5 f 2330 ));
/*
* btree name pattern
*/
-DATA(insert ( 2098 0 1 f 2332 ));
-DATA(insert ( 2098 0 2 f 2333 ));
-DATA(insert ( 2098 0 3 f 2334 ));
-DATA(insert ( 2098 0 4 f 2335 ));
-DATA(insert ( 2098 0 5 f 2336 ));
+DATA(insert ( 2098 0 1 f 2332 ));
+DATA(insert ( 2098 0 2 f 2333 ));
+DATA(insert ( 2098 0 3 f 2334 ));
+DATA(insert ( 2098 0 4 f 2335 ));
+DATA(insert ( 2098 0 5 f 2336 ));
/*
* btree money_ops
*/
-DATA(insert ( 2099 0 1 f 902 ));
-DATA(insert ( 2099 0 2 f 904 ));
-DATA(insert ( 2099 0 3 f 900 ));
-DATA(insert ( 2099 0 4 f 905 ));
-DATA(insert ( 2099 0 5 f 903 ));
+DATA(insert ( 2099 0 1 f 902 ));
+DATA(insert ( 2099 0 2 f 904 ));
+DATA(insert ( 2099 0 3 f 900 ));
+DATA(insert ( 2099 0 4 f 905 ));
+DATA(insert ( 2099 0 5 f 903 ));
/*
* btree reltime_ops
*/
-DATA(insert ( 2233 0 1 f 568 ));
-DATA(insert ( 2233 0 2 f 570 ));
-DATA(insert ( 2233 0 3 f 566 ));
-DATA(insert ( 2233 0 4 f 571 ));
-DATA(insert ( 2233 0 5 f 569 ));
+DATA(insert ( 2233 0 1 f 568 ));
+DATA(insert ( 2233 0 2 f 570 ));
+DATA(insert ( 2233 0 3 f 566 ));
+DATA(insert ( 2233 0 4 f 571 ));
+DATA(insert ( 2233 0 5 f 569 ));
/*
* btree tinterval_ops
*/
-DATA(insert ( 2234 0 1 f 813 ));
-DATA(insert ( 2234 0 2 f 815 ));
-DATA(insert ( 2234 0 3 f 811 ));
-DATA(insert ( 2234 0 4 f 816 ));
-DATA(insert ( 2234 0 5 f 814 ));
+DATA(insert ( 2234 0 1 f 813 ));
+DATA(insert ( 2234 0 2 f 815 ));
+DATA(insert ( 2234 0 3 f 811 ));
+DATA(insert ( 2234 0 4 f 816 ));
+DATA(insert ( 2234 0 5 f 814 ));
/*
* btree array_ops
*/
-DATA(insert ( 397 0 1 f 1072 ));
-DATA(insert ( 397 0 2 f 1074 ));
-DATA(insert ( 397 0 3 f 1070 ));
-DATA(insert ( 397 0 4 f 1075 ));
-DATA(insert ( 397 0 5 f 1073 ));
+DATA(insert ( 397 0 1 f 1072 ));
+DATA(insert ( 397 0 2 f 1074 ));
+DATA(insert ( 397 0 3 f 1070 ));
+DATA(insert ( 397 0 4 f 1075 ));
+DATA(insert ( 397 0 5 f 1073 ));
/*
* hash index _ops
*/
/* bpchar_ops */
-DATA(insert ( 427 0 1 f 1054 ));
+DATA(insert ( 427 0 1 f 1054 ));
/* char_ops */
-DATA(insert ( 431 0 1 f 92 ));
+DATA(insert ( 431 0 1 f 92 ));
/* cidr_ops */
-DATA(insert ( 433 0 1 f 820 ));
+DATA(insert ( 433 0 1 f 820 ));
/* date_ops */
-DATA(insert ( 435 0 1 f 1093 ));
+DATA(insert ( 435 0 1 f 1093 ));
/* float4_ops */
-DATA(insert ( 1971 0 1 f 620 ));
+DATA(insert ( 1971 0 1 f 620 ));
/* float8_ops */
-DATA(insert ( 1973 0 1 f 670 ));
+DATA(insert ( 1973 0 1 f 670 ));
/* inet_ops */
-DATA(insert ( 1975 0 1 f 1201 ));
+DATA(insert ( 1975 0 1 f 1201 ));
/* int2_ops */
-DATA(insert ( 1977 0 1 f 94 ));
+DATA(insert ( 1977 0 1 f 94 ));
/* int4_ops */
-DATA(insert ( 1979 0 1 f 96 ));
+DATA(insert ( 1979 0 1 f 96 ));
/* int8_ops */
-DATA(insert ( 1981 0 1 f 410 ));
+DATA(insert ( 1981 0 1 f 410 ));
/* interval_ops */
-DATA(insert ( 1983 0 1 f 1330 ));
+DATA(insert ( 1983 0 1 f 1330 ));
/* macaddr_ops */
-DATA(insert ( 1985 0 1 f 1220 ));
+DATA(insert ( 1985 0 1 f 1220 ));
/* name_ops */
-DATA(insert ( 1987 0 1 f 93 ));
+DATA(insert ( 1987 0 1 f 93 ));
/* oid_ops */
-DATA(insert ( 1990 0 1 f 607 ));
+DATA(insert ( 1990 0 1 f 607 ));
/* oidvector_ops */
-DATA(insert ( 1992 0 1 f 649 ));
+DATA(insert ( 1992 0 1 f 649 ));
/* text_ops */
-DATA(insert ( 1995 0 1 f 98 ));
+DATA(insert ( 1995 0 1 f 98 ));
/* time_ops */
-DATA(insert ( 1997 0 1 f 1108 ));
+DATA(insert ( 1997 0 1 f 1108 ));
/* timestamptz_ops */
-DATA(insert ( 1999 0 1 f 1320 ));
+DATA(insert ( 1999 0 1 f 1320 ));
/* timetz_ops */
-DATA(insert ( 2001 0 1 f 1550 ));
+DATA(insert ( 2001 0 1 f 1550 ));
/* varchar_ops */
-DATA(insert ( 2004 0 1 f 98 ));
+DATA(insert ( 2004 0 1 f 98 ));
/* timestamp_ops */
-DATA(insert ( 2040 0 1 f 2060 ));
+DATA(insert ( 2040 0 1 f 2060 ));
/* bool_ops */
-DATA(insert ( 2222 0 1 f 91 ));
+DATA(insert ( 2222 0 1 f 91 ));
/* bytea_ops */
-DATA(insert ( 2223 0 1 f 1955 ));
+DATA(insert ( 2223 0 1 f 1955 ));
/* int2vector_ops */
-DATA(insert ( 2224 0 1 f 386 ));
+DATA(insert ( 2224 0 1 f 386 ));
/* xid_ops */
-DATA(insert ( 2225 0 1 f 352 ));
+DATA(insert ( 2225 0 1 f 352 ));
/* cid_ops */
-DATA(insert ( 2226 0 1 f 385 ));
+DATA(insert ( 2226 0 1 f 385 ));
/* abstime_ops */
-DATA(insert ( 2227 0 1 f 560 ));
+DATA(insert ( 2227 0 1 f 560 ));
/* reltime_ops */
-DATA(insert ( 2228 0 1 f 566 ));
+DATA(insert ( 2228 0 1 f 566 ));
/* text_pattern_ops */
-DATA(insert ( 2229 0 1 f 2316 ));
+DATA(insert ( 2229 0 1 f 2316 ));
/* varchar_pattern_ops */
-DATA(insert ( 2230 0 1 f 2316 ));
+DATA(insert ( 2230 0 1 f 2316 ));
/* bpchar_pattern_ops */
-DATA(insert ( 2231 0 1 f 2328 ));
+DATA(insert ( 2231 0 1 f 2328 ));
/* name_pattern_ops */
-DATA(insert ( 2232 0 1 f 2334 ));
+DATA(insert ( 2232 0 1 f 2334 ));
/* aclitem_ops */
-DATA(insert ( 2235 0 1 f 974 ));
+DATA(insert ( 2235 0 1 f 974 ));
#endif /* PG_AMOP_H */
* The primary key for this table is
.
* amprocsubtype is equal to zero for an opclass's "default" procedures.
* Usually a nondefault amprocsubtype indicates a support procedure to be
- * used with operators having the same nondefault amopsubtype. The exact
+ * used with operators having the same nondefault amopsubtype. The exact
* behavior depends on the index AM, however, and some don't pay attention
* to subtype at all.
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.50 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_amproc.h,v 1.51 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
*/
/* rtree */
-DATA(insert ( 425 0 1 193 ));
-DATA(insert ( 425 0 2 194 ));
-DATA(insert ( 425 0 3 195 ));
-DATA(insert ( 1993 0 1 197 ));
-DATA(insert ( 1993 0 2 198 ));
-DATA(insert ( 1993 0 3 199 ));
+DATA(insert ( 425 0 1 193 ));
+DATA(insert ( 425 0 2 194 ));
+DATA(insert ( 425 0 3 195 ));
+DATA(insert ( 1993 0 1 197 ));
+DATA(insert ( 1993 0 2 198 ));
+DATA(insert ( 1993 0 3 199 ));
/* btree */
-DATA(insert ( 397 0 1 382 ));
-DATA(insert ( 421 0 1 357 ));
-DATA(insert ( 423 0 1 1596 ));
-DATA(insert ( 424 0 1 1693 ));
-DATA(insert ( 426 0 1 1078 ));
-DATA(insert ( 428 0 1 1954 ));
-DATA(insert ( 429 0 1 358 ));
-DATA(insert ( 432 0 1 926 ));
-DATA(insert ( 434 0 1 1092 ));
+DATA(insert ( 397 0 1 382 ));
+DATA(insert ( 421 0 1 357 ));
+DATA(insert ( 423 0 1 1596 ));
+DATA(insert ( 424 0 1 1693 ));
+DATA(insert ( 426 0 1 1078 ));
+DATA(insert ( 428 0 1 1954 ));
+DATA(insert ( 429 0 1 358 ));
+DATA(insert ( 432 0 1 926 ));
+DATA(insert ( 434 0 1 1092 ));
DATA(insert ( 434 1114 1 2344 ));
DATA(insert ( 434 1184 1 2357 ));
-DATA(insert ( 1970 0 1 354 ));
-DATA(insert ( 1970 701 1 2194 ));
-DATA(insert ( 1972 0 1 355 ));
-DATA(insert ( 1972 700 1 2195 ));
-DATA(insert ( 1974 0 1 926 ));
-DATA(insert ( 1976 0 1 350 ));
-DATA(insert ( 1976 23 1 2190 ));
-DATA(insert ( 1976 20 1 2192 ));
-DATA(insert ( 1978 0 1 351 ));
-DATA(insert ( 1978 20 1 2188 ));
-DATA(insert ( 1978 21 1 2191 ));
-DATA(insert ( 1980 0 1 842 ));
-DATA(insert ( 1980 23 1 2189 ));
-DATA(insert ( 1980 21 1 2193 ));
-DATA(insert ( 1982 0 1 1315 ));
-DATA(insert ( 1984 0 1 836 ));
-DATA(insert ( 1986 0 1 359 ));
-DATA(insert ( 1988 0 1 1769 ));
-DATA(insert ( 1989 0 1 356 ));
-DATA(insert ( 1991 0 1 404 ));
-DATA(insert ( 1994 0 1 360 ));
-DATA(insert ( 1996 0 1 1107 ));
-DATA(insert ( 1998 0 1 1314 ));
+DATA(insert ( 1970 0 1 354 ));
+DATA(insert ( 1970 701 1 2194 ));
+DATA(insert ( 1972 0 1 355 ));
+DATA(insert ( 1972 700 1 2195 ));
+DATA(insert ( 1974 0 1 926 ));
+DATA(insert ( 1976 0 1 350 ));
+DATA(insert ( 1976 23 1 2190 ));
+DATA(insert ( 1976 20 1 2192 ));
+DATA(insert ( 1978 0 1 351 ));
+DATA(insert ( 1978 20 1 2188 ));
+DATA(insert ( 1978 21 1 2191 ));
+DATA(insert ( 1980 0 1 842 ));
+DATA(insert ( 1980 23 1 2189 ));
+DATA(insert ( 1980 21 1 2193 ));
+DATA(insert ( 1982 0 1 1315 ));
+DATA(insert ( 1984 0 1 836 ));
+DATA(insert ( 1986 0 1 359 ));
+DATA(insert ( 1988 0 1 1769 ));
+DATA(insert ( 1989 0 1 356 ));
+DATA(insert ( 1991 0 1 404 ));
+DATA(insert ( 1994 0 1 360 ));
+DATA(insert ( 1996 0 1 1107 ));
+DATA(insert ( 1998 0 1 1314 ));
DATA(insert ( 1998 1082 1 2383 ));
DATA(insert ( 1998 1114 1 2533 ));
-DATA(insert ( 2000 0 1 1358 ));
-DATA(insert ( 2002 0 1 1672 ));
-DATA(insert ( 2003 0 1 360 ));
-DATA(insert ( 2039 0 1 2045 ));
+DATA(insert ( 2000 0 1 1358 ));
+DATA(insert ( 2002 0 1 1672 ));
+DATA(insert ( 2003 0 1 360 ));
+DATA(insert ( 2039 0 1 2045 ));
DATA(insert ( 2039 1082 1 2370 ));
DATA(insert ( 2039 1184 1 2526 ));
-DATA(insert ( 2095 0 1 2166 ));
-DATA(insert ( 2096 0 1 2166 ));
-DATA(insert ( 2097 0 1 2180 ));
-DATA(insert ( 2098 0 1 2187 ));
-DATA(insert ( 2099 0 1 377 ));
-DATA(insert ( 2233 0 1 380 ));
-DATA(insert ( 2234 0 1 381 ));
+DATA(insert ( 2095 0 1 2166 ));
+DATA(insert ( 2096 0 1 2166 ));
+DATA(insert ( 2097 0 1 2180 ));
+DATA(insert ( 2098 0 1 2187 ));
+DATA(insert ( 2099 0 1 377 ));
+DATA(insert ( 2233 0 1 380 ));
+DATA(insert ( 2234 0 1 381 ));
/* hash */
-DATA(insert ( 427 0 1 1080 ));
-DATA(insert ( 431 0 1 454 ));
-DATA(insert ( 433 0 1 422 ));
-DATA(insert ( 435 0 1 450 ));
-DATA(insert ( 1971 0 1 451 ));
-DATA(insert ( 1973 0 1 452 ));
-DATA(insert ( 1975 0 1 422 ));
-DATA(insert ( 1977 0 1 449 ));
-DATA(insert ( 1979 0 1 450 ));
-DATA(insert ( 1981 0 1 949 ));
-DATA(insert ( 1983 0 1 1697 ));
-DATA(insert ( 1985 0 1 399 ));
-DATA(insert ( 1987 0 1 455 ));
-DATA(insert ( 1990 0 1 453 ));
-DATA(insert ( 1992 0 1 457 ));
-DATA(insert ( 1995 0 1 400 ));
-DATA(insert ( 1997 0 1 452 ));
-DATA(insert ( 1999 0 1 452 ));
-DATA(insert ( 2001 0 1 1696 ));
-DATA(insert ( 2004 0 1 400 ));
-DATA(insert ( 2040 0 1 452 ));
-DATA(insert ( 2222 0 1 454 ));
-DATA(insert ( 2223 0 1 456 ));
-DATA(insert ( 2224 0 1 398 ));
-DATA(insert ( 2225 0 1 450 ));
-DATA(insert ( 2226 0 1 450 ));
-DATA(insert ( 2227 0 1 450 ));
-DATA(insert ( 2228 0 1 450 ));
-DATA(insert ( 2229 0 1 456 ));
-DATA(insert ( 2230 0 1 456 ));
-DATA(insert ( 2231 0 1 456 ));
-DATA(insert ( 2232 0 1 455 ));
-DATA(insert ( 2235 0 1 329 ));
+DATA(insert ( 427 0 1 1080 ));
+DATA(insert ( 431 0 1 454 ));
+DATA(insert ( 433 0 1 422 ));
+DATA(insert ( 435 0 1 450 ));
+DATA(insert ( 1971 0 1 451 ));
+DATA(insert ( 1973 0 1 452 ));
+DATA(insert ( 1975 0 1 422 ));
+DATA(insert ( 1977 0 1 449 ));
+DATA(insert ( 1979 0 1 450 ));
+DATA(insert ( 1981 0 1 949 ));
+DATA(insert ( 1983 0 1 1697 ));
+DATA(insert ( 1985 0 1 399 ));
+DATA(insert ( 1987 0 1 455 ));
+DATA(insert ( 1990 0 1 453 ));
+DATA(insert ( 1992 0 1 457 ));
+DATA(insert ( 1995 0 1 400 ));
+DATA(insert ( 1997 0 1 452 ));
+DATA(insert ( 1999 0 1 452 ));
+DATA(insert ( 2001 0 1 1696 ));
+DATA(insert ( 2004 0 1 400 ));
+DATA(insert ( 2040 0 1 452 ));
+DATA(insert ( 2222 0 1 454 ));
+DATA(insert ( 2223 0 1 456 ));
+DATA(insert ( 2224 0 1 398 ));
+DATA(insert ( 2225 0 1 450 ));
+DATA(insert ( 2226 0 1 450 ));
+DATA(insert ( 2227 0 1 450 ));
+DATA(insert ( 2228 0 1 450 ));
+DATA(insert ( 2229 0 1 456 ));
+DATA(insert ( 2230 0 1 456 ));
+DATA(insert ( 2231 0 1 456 ));
+DATA(insert ( 2232 0 1 455 ));
+DATA(insert ( 2235 0 1 329 ));
#endif /* PG_AMPROC_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.111 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_attribute.h,v 1.112 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
{ 1247, {"typoutput"}, 24, -1, 4, 12, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1247, {"typreceive"}, 24, -1, 4, 13, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1247, {"typsend"}, 24, -1, 4, 14, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
-{ 1247, {"typanalyze"}, 24, -1, 4, 15, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
+{ 1247, {"typanalyze"}, 24, -1, 4, 15, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1247, {"typalign"}, 18, -1, 1, 16, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
{ 1247, {"typstorage"}, 18, -1, 1, 17, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
{ 1247, {"typnotnull"}, 16, -1, 1, 18, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
* pg_database
* ----------------
*/
-DATA(insert ( 1262 datname 19 -1 NAMEDATALEN 1 0 -1 -1 f p i t f f t 0));
+DATA(insert ( 1262 datname 19 -1 NAMEDATALEN 1 0 -1 -1 f p i t f f t 0));
DATA(insert ( 1262 datdba 23 -1 4 2 0 -1 -1 t p i t f f t 0));
DATA(insert ( 1262 encoding 23 -1 4 3 0 -1 -1 t p i t f f t 0));
DATA(insert ( 1262 datistemplate 16 -1 1 4 0 -1 -1 t p c t f f t 0));
#define Schema_pg_proc \
{ 1255, {"proname"}, 19, -1, NAMEDATALEN, 1, 0, -1, -1, false, 'p', 'i', true, false, false, true, 0 }, \
{ 1255, {"pronamespace"}, 26, -1, 4, 2, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
-{ 1255, {"proowner"}, 23, -1, 4, 3, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
-{ 1255, {"prolang"}, 26, -1, 4, 4, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
+{ 1255, {"proowner"}, 23, -1, 4, 3, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
+{ 1255, {"prolang"}, 26, -1, 4, 4, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1255, {"proisagg"}, 16, -1, 1, 5, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1255, {"prosecdef"}, 16, -1, 1, 6, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1255, {"proisstrict"}, 16, -1, 1, 7, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1255, {"proretset"}, 16, -1, 1, 8, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1255, {"provolatile"}, 18, -1, 1, 9, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
-{ 1255, {"pronargs"}, 21, -1, 2, 10, 0, -1, -1, true, 'p', 's', true, false, false, true, 0 }, \
-{ 1255, {"prorettype"}, 26, -1, 4, 11, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
+{ 1255, {"prosecdef"}, 16, -1, 1, 6, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
+{ 1255, {"proisstrict"}, 16, -1, 1, 7, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
+{ 1255, {"proretset"}, 16, -1, 1, 8, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
+{ 1255, {"provolatile"}, 18, -1, 1, 9, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
+{ 1255, {"pronargs"}, 21, -1, 2, 10, 0, -1, -1, true, 'p', 's', true, false, false, true, 0 }, \
+{ 1255, {"prorettype"}, 26, -1, 4, 11, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1255, {"proargtypes"}, 30, -1, INDEX_MAX_KEYS*4, 12, 0, -1, -1, false, 'p', 'i', true, false, false, true, 0 }, \
{ 1255, {"proargnames"}, 1009, -1, -1, 13, 1, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1255, {"prosrc"}, 25, -1, -1, 14, 0, -1, -1, false, 'x', 'i', false, false, false, true, 0 }, \
{ 1259, {"relfilenode"}, 26, -1, 4, 6, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"reltablespace"}, 26, -1, 4, 7, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"relpages"}, 23, -1, 4, 8, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
-{ 1259, {"reltuples"}, 700, -1, 4, 9, 0, -1, -1, false, 'p', 'i', true, false, false, true, 0 }, \
+{ 1259, {"reltuples"}, 700, -1, 4, 9, 0, -1, -1, false, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"reltoastrelid"}, 26, -1, 4, 10, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"reltoastidxid"}, 26, -1, 4, 11, 0, -1, -1, true, 'p', 'i', true, false, false, true, 0 }, \
{ 1259, {"relhasindex"}, 16, -1, 1, 12, 0, -1, -1, true, 'p', 'c', true, false, false, true, 0 }, \
*
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.14 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_cast.h,v 1.15 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* expression */
COERCION_CODE_ASSIGNMENT = 'a', /* coercion in context of
* assignment */
- COERCION_CODE_EXPLICIT = 'e' /* explicit cast operation */
+ COERCION_CODE_EXPLICIT = 'e' /* explicit cast operation */
} CoercionCodes;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.83 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_class.h,v 1.84 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DESCR("");
DATA(insert OID = 1259 ( pg_class PGNSP 83 PGUID 0 1259 0 0 0 0 0 f f r 25 0 0 0 0 0 t f f f _null_ ));
DESCR("");
-DATA(insert OID = 1260 ( pg_shadow PGNSP 86 PGUID 0 1260 1664 0 0 0 0 f t r 8 0 0 0 0 0 f f f f _null_ ));
+DATA(insert OID = 1260 ( pg_shadow PGNSP 86 PGUID 0 1260 1664 0 0 0 0 f t r 8 0 0 0 0 0 f f f f _null_ ));
DESCR("");
-DATA(insert OID = 1261 ( pg_group PGNSP 87 PGUID 0 1261 1664 0 0 0 0 f t r 3 0 0 0 0 0 f f f f _null_ ));
+DATA(insert OID = 1261 ( pg_group PGNSP 87 PGUID 0 1261 1664 0 0 0 0 f t r 3 0 0 0 0 0 f f f f _null_ ));
DESCR("");
DATA(insert OID = 1262 ( pg_database PGNSP 88 PGUID 0 1262 1664 0 0 0 0 f t r 11 0 0 0 0 0 t f f f _null_ ));
DESCR("");
-DATA(insert OID = 1213 ( pg_tablespace PGNSP 90 PGUID 0 1213 1664 0 0 0 0 f t r 4 0 0 0 0 0 t f f f _null_ ));
+DATA(insert OID = 1213 ( pg_tablespace PGNSP 90 PGUID 0 1213 1664 0 0 0 0 f t r 4 0 0 0 0 0 t f f f _null_ ));
DESCR("");
DATA(insert OID = 376 ( pg_xactlock PGNSP 0 PGUID 0 0 1664 0 0 0 0 f t s 1 0 0 0 0 0 f f f f _null_ ));
DESCR("");
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.12 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_constraint.h,v 1.13 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
extern void RemoveConstraintById(Oid conId);
extern bool ConstraintNameIsUsed(ConstraintCategory conCat, Oid objId,
- Oid objNamespace, const char *conname);
+ Oid objNamespace, const char *conname);
extern char *ChooseConstraintName(const char *name1, const char *name2,
- const char *label, Oid namespace,
- List *others);
+ const char *label, Oid namespace,
+ List *others);
#endif /* PG_CONSTRAINT_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.17 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_control.h,v 1.18 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef struct CheckPoint
{
- XLogRecPtr redo; /* next RecPtr available when we
- * began to create CheckPoint
- * (i.e. REDO start point) */
+ XLogRecPtr redo; /* next RecPtr available when we began to
+ * create CheckPoint (i.e. REDO start
+ * point) */
XLogRecPtr undo; /* first record of oldest in-progress
- * transaction when we started
- * (i.e. UNDO end point) */
- TimeLineID ThisTimeLineID; /* current TLI */
+ * transaction when we started (i.e. UNDO
+ * end point) */
+ TimeLineID ThisTimeLineID; /* current TLI */
TransactionId nextXid; /* next free XID */
Oid nextOid; /* next free OID */
time_t time; /* time stamp of checkpoint */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_namespace.h,v 1.13 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_namespace.h,v 1.14 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
#define Natts_pg_namespace 4
#define Anum_pg_namespace_nspname 1
#define Anum_pg_namespace_nspowner 2
-#define Anum_pg_namespace_nsptablespace 3
+#define Anum_pg_namespace_nsptablespace 3
#define Anum_pg_namespace_nspacl 4
/*
* prototypes for functions in pg_namespace.c
*/
-extern Oid NamespaceCreate(const char *nspName, int32 ownerSysId,
- Oid nspTablespace);
+extern Oid NamespaceCreate(const char *nspName, int32 ownerSysId,
+ Oid nspTablespace);
#endif /* PG_NAMESPACE_H */
* such an index.
*
* Normally opckeytype = InvalidOid (zero), indicating that the data stored
- * in the index is the same as the data in the indexed column. If opckeytype
+ * in the index is the same as the data in the indexed column. If opckeytype
* is nonzero then it indicates that a conversion step is needed to produce
* the stored index data, which will be of type opckeytype (which might be
- * the same or different from the input datatype). Performing such a
+ * the same or different from the input datatype). Performing such a
* conversion is the responsibility of the index access method --- not all
* AMs support this.
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_opclass.h,v 1.60 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_opclass.h,v 1.61 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.127 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_operator.h,v 1.128 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DATA(insert OID = 352 ( "=" PGNSP PGUID b t 28 28 16 352 0 0 0 0 0 xideq eqsel eqjoinsel ));
DATA(insert OID = 353 ( "=" PGNSP PGUID b f 28 23 16 0 0 0 0 0 0 xideqint4 eqsel eqjoinsel ));
-DATA(insert OID = 388 ( "!" PGNSP PGUID r f 20 0 1700 0 0 0 0 0 0 numeric_fac - - ));
-DATA(insert OID = 389 ( "!!" PGNSP PGUID l f 0 20 1700 0 0 0 0 0 0 numeric_fac - - ));
+DATA(insert OID = 388 ( "!" PGNSP PGUID r f 20 0 1700 0 0 0 0 0 0 numeric_fac - - ));
+DATA(insert OID = 389 ( "!!" PGNSP PGUID l f 0 20 1700 0 0 0 0 0 0 numeric_fac - - ));
DATA(insert OID = 385 ( "=" PGNSP PGUID b t 29 29 16 385 0 0 0 0 0 cideq eqsel eqjoinsel ));
DATA(insert OID = 386 ( "=" PGNSP PGUID b t 22 22 16 386 0 0 0 0 0 int2vectoreq eqsel eqjoinsel ));
DATA(insert OID = 387 ( "=" PGNSP PGUID b f 27 27 16 387 0 0 0 0 0 tideq eqsel eqjoinsel ));
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.344 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_proc.h,v 1.345 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* The script catalog/genbki.sh reads this file and generates .bki
int2 pronargs; /* number of arguments */
Oid prorettype; /* OID of result type */
oidvector proargtypes; /* OIDs of argument types */
- text proargnames[1]; /* VARIABLE LENGTH FIELD */
+ text proargnames[1]; /* VARIABLE LENGTH FIELD */
text prosrc; /* VARIABLE LENGTH FIELD */
bytea probin; /* VARIABLE LENGTH FIELD */
aclitem proacl[1]; /* VARIABLE LENGTH FIELD */
DESCR("I/O");
DATA(insert OID = 55 ( oidvectorout PGNSP PGUID 12 f f t f i 1 2275 "30" _null_ oidvectorout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 56 ( boollt PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boollt - _null_ ));
+DATA(insert OID = 56 ( boollt PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boollt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 57 ( boolgt PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolgt - _null_ ));
+DATA(insert OID = 57 ( boolgt PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolgt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 60 ( booleq PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ booleq - _null_ ));
+DATA(insert OID = 60 ( booleq PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ booleq - _null_ ));
DESCR("equal");
-DATA(insert OID = 61 ( chareq PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ chareq - _null_ ));
+DATA(insert OID = 61 ( chareq PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ chareq - _null_ ));
DESCR("equal");
-DATA(insert OID = 62 ( nameeq PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ nameeq - _null_ ));
+DATA(insert OID = 62 ( nameeq PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ nameeq - _null_ ));
DESCR("equal");
-DATA(insert OID = 63 ( int2eq PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2eq - _null_ ));
+DATA(insert OID = 63 ( int2eq PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 64 ( int2lt PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2lt - _null_ ));
+DATA(insert OID = 64 ( int2lt PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 65 ( int4eq PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4eq - _null_ ));
+DATA(insert OID = 65 ( int4eq PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 66 ( int4lt PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4lt - _null_ ));
+DATA(insert OID = 66 ( int4lt PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 67 ( texteq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texteq - _null_ ));
+DATA(insert OID = 67 ( texteq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texteq - _null_ ));
DESCR("equal");
-DATA(insert OID = 68 ( xideq PGNSP PGUID 12 f f t f i 2 16 "28 28" _null_ xideq - _null_ ));
+DATA(insert OID = 68 ( xideq PGNSP PGUID 12 f f t f i 2 16 "28 28" _null_ xideq - _null_ ));
DESCR("equal");
-DATA(insert OID = 69 ( cideq PGNSP PGUID 12 f f t f i 2 16 "29 29" _null_ cideq - _null_ ));
+DATA(insert OID = 69 ( cideq PGNSP PGUID 12 f f t f i 2 16 "29 29" _null_ cideq - _null_ ));
DESCR("equal");
-DATA(insert OID = 70 ( charne PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charne - _null_ ));
+DATA(insert OID = 70 ( charne PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1246 ( charlt PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charlt - _null_ ));
+DATA(insert OID = 1246 ( charlt PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charlt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 72 ( charle PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charle - _null_ ));
+DATA(insert OID = 72 ( charle PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 73 ( chargt PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ chargt - _null_ ));
+DATA(insert OID = 73 ( chargt PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ chargt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 74 ( charge PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charge - _null_ ));
+DATA(insert OID = 74 ( charge PGNSP PGUID 12 f f t f i 2 16 "18 18" _null_ charge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1248 ( charpl PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charpl - _null_ ));
+DATA(insert OID = 1248 ( charpl PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charpl - _null_ ));
DESCR("add");
-DATA(insert OID = 1250 ( charmi PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charmi - _null_ ));
+DATA(insert OID = 1250 ( charmi PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charmi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 77 ( charmul PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charmul - _null_ ));
+DATA(insert OID = 77 ( charmul PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ charmul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 78 ( chardiv PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ chardiv - _null_ ));
+DATA(insert OID = 78 ( chardiv PGNSP PGUID 12 f f t f i 2 18 "18 18" _null_ chardiv - _null_ ));
DESCR("divide");
-DATA(insert OID = 79 ( nameregexeq PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameregexeq - _null_ ));
+DATA(insert OID = 79 ( nameregexeq PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameregexeq - _null_ ));
DESCR("matches regex., case-sensitive");
-DATA(insert OID = 1252 ( nameregexne PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameregexne - _null_ ));
+DATA(insert OID = 1252 ( nameregexne PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameregexne - _null_ ));
DESCR("does not match regex., case-sensitive");
-DATA(insert OID = 1254 ( textregexeq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textregexeq - _null_ ));
+DATA(insert OID = 1254 ( textregexeq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textregexeq - _null_ ));
DESCR("matches regex., case-sensitive");
-DATA(insert OID = 1256 ( textregexne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textregexne - _null_ ));
+DATA(insert OID = 1256 ( textregexne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textregexne - _null_ ));
DESCR("does not match regex., case-sensitive");
DATA(insert OID = 1257 ( textlen PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
DESCR("length");
-DATA(insert OID = 1258 ( textcat PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ textcat - _null_ ));
+DATA(insert OID = 1258 ( textcat PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ textcat - _null_ ));
DESCR("concatenate");
-DATA(insert OID = 84 ( boolne PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolne - _null_ ));
+DATA(insert OID = 84 ( boolne PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolne - _null_ ));
DESCR("not equal");
DATA(insert OID = 89 ( version PGNSP PGUID 12 f f t f s 0 25 "" _null_ pgsql_version - _null_ ));
DESCR("PostgreSQL version string");
/* OIDS 100 - 199 */
-DATA(insert OID = 101 ( eqsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ eqsel - _null_ ));
+DATA(insert OID = 101 ( eqsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ eqsel - _null_ ));
DESCR("restriction selectivity of = and related operators");
-DATA(insert OID = 102 ( neqsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ neqsel - _null_ ));
+DATA(insert OID = 102 ( neqsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ neqsel - _null_ ));
DESCR("restriction selectivity of <> and related operators");
-DATA(insert OID = 103 ( scalarltsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ scalarltsel - _null_ ));
+DATA(insert OID = 103 ( scalarltsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ scalarltsel - _null_ ));
DESCR("restriction selectivity of < and related operators on scalar datatypes");
-DATA(insert OID = 104 ( scalargtsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ scalargtsel - _null_ ));
+DATA(insert OID = 104 ( scalargtsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ scalargtsel - _null_ ));
DESCR("restriction selectivity of > and related operators on scalar datatypes");
-DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ eqjoinsel - _null_ ));
+DATA(insert OID = 105 ( eqjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ eqjoinsel - _null_ ));
DESCR("join selectivity of = and related operators");
-DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ neqjoinsel - _null_ ));
+DATA(insert OID = 106 ( neqjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ neqjoinsel - _null_ ));
DESCR("join selectivity of <> and related operators");
-DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ scalarltjoinsel - _null_ ));
+DATA(insert OID = 107 ( scalarltjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ scalarltjoinsel - _null_ ));
DESCR("join selectivity of < and related operators on scalar datatypes");
-DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ scalargtjoinsel - _null_ ));
+DATA(insert OID = 108 ( scalargtjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ scalargtjoinsel - _null_ ));
DESCR("join selectivity of > and related operators on scalar datatypes");
-DATA(insert OID = 109 ( unknownin PGNSP PGUID 12 f f t f i 1 705 "2275" _null_ unknownin - _null_ ));
+DATA(insert OID = 109 ( unknownin PGNSP PGUID 12 f f t f i 1 705 "2275" _null_ unknownin - _null_ ));
DESCR("I/O");
DATA(insert OID = 110 ( unknownout PGNSP PGUID 12 f f t f i 1 2275 "705" _null_ unknownout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ numeric_fac - _null_ ));
+DATA(insert OID = 111 ( numeric_fac PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ numeric_fac - _null_ ));
DATA(insert OID = 112 ( text PGNSP PGUID 12 f f t f i 1 25 "23" _null_ int4_text - _null_ ));
DESCR("convert int4 to text");
DATA(insert OID = 113 ( text PGNSP PGUID 12 f f t f i 1 25 "21" _null_ int2_text - _null_ ));
DATA(insert OID = 114 ( text PGNSP PGUID 12 f f t f i 1 25 "26" _null_ oid_text - _null_ ));
DESCR("convert oid to text");
-DATA(insert OID = 115 ( box_above PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_above - _null_ ));
+DATA(insert OID = 115 ( box_above PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_above - _null_ ));
DESCR("is above");
-DATA(insert OID = 116 ( box_below PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_below - _null_ ));
+DATA(insert OID = 116 ( box_below PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_below - _null_ ));
DESCR("is below");
DATA(insert OID = 117 ( point_in PGNSP PGUID 12 f f t f i 1 600 "2275" _null_ point_in - _null_ ));
DESCR("point inside box?");
DATA(insert OID = 137 ( on_ppath PGNSP PGUID 12 f f t f i 2 16 "600 602" _null_ on_ppath - _null_ ));
DESCR("point within closed path, or point on open path");
-DATA(insert OID = 138 ( box_center PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
+DATA(insert OID = 138 ( box_center PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 139 ( areasel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ areasel - _null_ ));
+DATA(insert OID = 139 ( areasel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ areasel - _null_ ));
DESCR("restriction selectivity for area-comparison operators");
-DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ areajoinsel - _null_ ));
+DATA(insert OID = 140 ( areajoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ areajoinsel - _null_ ));
DESCR("join selectivity for area-comparison operators");
-DATA(insert OID = 141 ( int4mul PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mul - _null_ ));
+DATA(insert OID = 141 ( int4mul PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 144 ( int4ne PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4ne - _null_ ));
+DATA(insert OID = 144 ( int4ne PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 145 ( int2ne PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2ne - _null_ ));
+DATA(insert OID = 145 ( int2ne PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 146 ( int2gt PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2gt - _null_ ));
+DATA(insert OID = 146 ( int2gt PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 147 ( int4gt PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4gt - _null_ ));
+DATA(insert OID = 147 ( int4gt PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 148 ( int2le PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2le - _null_ ));
+DATA(insert OID = 148 ( int2le PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 149 ( int4le PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4le - _null_ ));
+DATA(insert OID = 149 ( int4le PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 150 ( int4ge PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4ge - _null_ ));
+DATA(insert OID = 150 ( int4ge PGNSP PGUID 12 f f t f i 2 16 "23 23" _null_ int4ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 151 ( int2ge PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2ge - _null_ ));
+DATA(insert OID = 151 ( int2ge PGNSP PGUID 12 f f t f i 2 16 "21 21" _null_ int2ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 152 ( int2mul PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mul - _null_ ));
+DATA(insert OID = 152 ( int2mul PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 153 ( int2div PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2div - _null_ ));
+DATA(insert OID = 153 ( int2div PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2div - _null_ ));
DESCR("divide");
-DATA(insert OID = 154 ( int4div PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4div - _null_ ));
+DATA(insert OID = 154 ( int4div PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4div - _null_ ));
DESCR("divide");
-DATA(insert OID = 155 ( int2mod PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mod - _null_ ));
+DATA(insert OID = 155 ( int2mod PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 156 ( int4mod PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mod - _null_ ));
+DATA(insert OID = 156 ( int4mod PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 157 ( textne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textne - _null_ ));
+DATA(insert OID = 157 ( textne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 158 ( int24eq PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24eq - _null_ ));
+DATA(insert OID = 158 ( int24eq PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 159 ( int42eq PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42eq - _null_ ));
+DATA(insert OID = 159 ( int42eq PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 160 ( int24lt PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24lt - _null_ ));
+DATA(insert OID = 160 ( int24lt PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 161 ( int42lt PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42lt - _null_ ));
+DATA(insert OID = 161 ( int42lt PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 162 ( int24gt PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24gt - _null_ ));
+DATA(insert OID = 162 ( int24gt PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 163 ( int42gt PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42gt - _null_ ));
+DATA(insert OID = 163 ( int42gt PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 164 ( int24ne PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24ne - _null_ ));
+DATA(insert OID = 164 ( int24ne PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 165 ( int42ne PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42ne - _null_ ));
+DATA(insert OID = 165 ( int42ne PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 166 ( int24le PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24le - _null_ ));
+DATA(insert OID = 166 ( int24le PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 167 ( int42le PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42le - _null_ ));
+DATA(insert OID = 167 ( int42le PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 168 ( int24ge PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24ge - _null_ ));
+DATA(insert OID = 168 ( int24ge PGNSP PGUID 12 f f t f i 2 16 "21 23" _null_ int24ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 169 ( int42ge PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42ge - _null_ ));
+DATA(insert OID = 169 ( int42ge PGNSP PGUID 12 f f t f i 2 16 "23 21" _null_ int42ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 170 ( int24mul PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mul - _null_ ));
+DATA(insert OID = 170 ( int24mul PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 171 ( int42mul PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mul - _null_ ));
+DATA(insert OID = 171 ( int42mul PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 172 ( int24div PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24div - _null_ ));
+DATA(insert OID = 172 ( int24div PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24div - _null_ ));
DESCR("divide");
-DATA(insert OID = 173 ( int42div PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42div - _null_ ));
+DATA(insert OID = 173 ( int42div PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42div - _null_ ));
DESCR("divide");
-DATA(insert OID = 174 ( int24mod PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mod - _null_ ));
+DATA(insert OID = 174 ( int24mod PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 175 ( int42mod PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mod - _null_ ));
+DATA(insert OID = 175 ( int42mod PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 176 ( int2pl PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2pl - _null_ ));
+DATA(insert OID = 176 ( int2pl PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2pl - _null_ ));
DESCR("add");
-DATA(insert OID = 177 ( int4pl PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4pl - _null_ ));
+DATA(insert OID = 177 ( int4pl PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4pl - _null_ ));
DESCR("add");
-DATA(insert OID = 178 ( int24pl PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24pl - _null_ ));
+DATA(insert OID = 178 ( int24pl PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24pl - _null_ ));
DESCR("add");
-DATA(insert OID = 179 ( int42pl PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42pl - _null_ ));
+DATA(insert OID = 179 ( int42pl PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42pl - _null_ ));
DESCR("add");
-DATA(insert OID = 180 ( int2mi PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mi - _null_ ));
+DATA(insert OID = 180 ( int2mi PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 181 ( int4mi PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mi - _null_ ));
+DATA(insert OID = 181 ( int4mi PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 182 ( int24mi PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mi - _null_ ));
+DATA(insert OID = 182 ( int24mi PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 183 ( int42mi PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mi - _null_ ));
+DATA(insert OID = 183 ( int42mi PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 184 ( oideq PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oideq - _null_ ));
+DATA(insert OID = 184 ( oideq PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oideq - _null_ ));
DESCR("equal");
-DATA(insert OID = 185 ( oidne PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidne - _null_ ));
+DATA(insert OID = 185 ( oidne PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidne - _null_ ));
DESCR("not equal");
DATA(insert OID = 186 ( box_same PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_same - _null_ ));
DESCR("same as?");
DESCR("is right of");
DATA(insert OID = 192 ( box_contained PGNSP PGUID 12 f f t f i 2 16 "603 603" _null_ box_contained - _null_ ));
DESCR("contained in?");
-DATA(insert OID = 193 ( rt_box_union PGNSP PGUID 12 f f t f i 2 603 "603 603" _null_ rt_box_union - _null_ ));
+DATA(insert OID = 193 ( rt_box_union PGNSP PGUID 12 f f t f i 2 603 "603 603" _null_ rt_box_union - _null_ ));
DESCR("r-tree");
DATA(insert OID = 194 ( rt_box_inter PGNSP PGUID 12 f f t f i 2 2278 "603 603" _null_ rt_box_inter - _null_ ));
DESCR("r-tree");
DATA(insert OID = 195 ( rt_box_size PGNSP PGUID 12 f f t f i 2 2278 "603 2281" _null_ rt_box_size - _null_ ));
DESCR("r-tree");
-DATA(insert OID = 197 ( rt_poly_union PGNSP PGUID 12 f f t f i 2 604 "604 604" _null_ rt_poly_union - _null_ ));
+DATA(insert OID = 197 ( rt_poly_union PGNSP PGUID 12 f f t f i 2 604 "604 604" _null_ rt_poly_union - _null_ ));
DESCR("r-tree");
DATA(insert OID = 198 ( rt_poly_inter PGNSP PGUID 12 f f t f i 2 2278 "604 604" _null_ rt_poly_inter - _null_ ));
DESCR("r-tree");
DESCR("I/O");
DATA(insert OID = 201 ( float4out PGNSP PGUID 12 f f t f i 1 2275 "700" _null_ float4out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 202 ( float4mul PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4mul - _null_ ));
+DATA(insert OID = 202 ( float4mul PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 203 ( float4div PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4div - _null_ ));
+DATA(insert OID = 203 ( float4div PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4div - _null_ ));
DESCR("divide");
-DATA(insert OID = 204 ( float4pl PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4pl - _null_ ));
+DATA(insert OID = 204 ( float4pl PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4pl - _null_ ));
DESCR("add");
-DATA(insert OID = 205 ( float4mi PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4mi - _null_ ));
+DATA(insert OID = 205 ( float4mi PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 206 ( float4um PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4um - _null_ ));
+DATA(insert OID = 206 ( float4um PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4um - _null_ ));
DESCR("negate");
-DATA(insert OID = 207 ( float4abs PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4abs - _null_ ));
+DATA(insert OID = 207 ( float4abs PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4abs - _null_ ));
DESCR("absolute value");
DATA(insert OID = 208 ( float4_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 700" _null_ float4_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 209 ( float4larger PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4larger - _null_ ));
+DATA(insert OID = 209 ( float4larger PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 211 ( float4smaller PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4smaller - _null_ ));
+DATA(insert OID = 211 ( float4smaller PGNSP PGUID 12 f f t f i 2 700 "700 700" _null_ float4smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 212 ( int4um PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4um - _null_ ));
DESCR("I/O");
DATA(insert OID = 215 ( float8out PGNSP PGUID 12 f f t f i 1 2275 "701" _null_ float8out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 216 ( float8mul PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8mul - _null_ ));
+DATA(insert OID = 216 ( float8mul PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 217 ( float8div PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8div - _null_ ));
+DATA(insert OID = 217 ( float8div PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8div - _null_ ));
DESCR("divide");
-DATA(insert OID = 218 ( float8pl PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8pl - _null_ ));
+DATA(insert OID = 218 ( float8pl PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8pl - _null_ ));
DESCR("add");
-DATA(insert OID = 219 ( float8mi PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8mi - _null_ ));
+DATA(insert OID = 219 ( float8mi PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 220 ( float8um PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8um - _null_ ));
+DATA(insert OID = 220 ( float8um PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8um - _null_ ));
DESCR("negate");
-DATA(insert OID = 221 ( float8abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8abs - _null_ ));
+DATA(insert OID = 221 ( float8abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8abs - _null_ ));
DESCR("absolute value");
DATA(insert OID = 222 ( float8_accum PGNSP PGUID 12 f f t f i 2 1022 "1022 701" _null_ float8_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 223 ( float8larger PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8larger - _null_ ));
+DATA(insert OID = 223 ( float8larger PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 224 ( float8smaller PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8smaller - _null_ ));
+DATA(insert OID = 224 ( float8smaller PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ float8smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 225 ( lseg_center PGNSP PGUID 12 f f t f i 1 600 "601" _null_ lseg_center - _null_ ));
+DATA(insert OID = 225 ( lseg_center PGNSP PGUID 12 f f t f i 1 600 "601" _null_ lseg_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 226 ( path_center PGNSP PGUID 12 f f t f i 1 600 "602" _null_ path_center - _null_ ));
+DATA(insert OID = 226 ( path_center PGNSP PGUID 12 f f t f i 1 600 "602" _null_ path_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 227 ( poly_center PGNSP PGUID 12 f f t f i 1 600 "604" _null_ poly_center - _null_ ));
+DATA(insert OID = 227 ( poly_center PGNSP PGUID 12 f f t f i 1 600 "604" _null_ poly_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 228 ( dround PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dround - _null_ ));
+DATA(insert OID = 228 ( dround PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dround - _null_ ));
DESCR("round to nearest integer");
-DATA(insert OID = 229 ( dtrunc PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dtrunc - _null_ ));
+DATA(insert OID = 229 ( dtrunc PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dtrunc - _null_ ));
DESCR("truncate to integer");
-DATA(insert OID = 2308 ( ceil PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dceil - _null_ ));
+DATA(insert OID = 2308 ( ceil PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dceil - _null_ ));
DESCR("smallest integer >= value");
-DATA(insert OID = 2320 ( ceiling PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dceil - _null_ ));
+DATA(insert OID = 2320 ( ceiling PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dceil - _null_ ));
DESCR("smallest integer >= value");
-DATA(insert OID = 2309 ( floor PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dfloor - _null_ ));
+DATA(insert OID = 2309 ( floor PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dfloor - _null_ ));
DESCR("largest integer <= value");
-DATA(insert OID = 2310 ( sign PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsign - _null_ ));
+DATA(insert OID = 2310 ( sign PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsign - _null_ ));
DESCR("sign of value");
-DATA(insert OID = 230 ( dsqrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsqrt - _null_ ));
+DATA(insert OID = 230 ( dsqrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsqrt - _null_ ));
DESCR("square root");
-DATA(insert OID = 231 ( dcbrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcbrt - _null_ ));
+DATA(insert OID = 231 ( dcbrt PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcbrt - _null_ ));
DESCR("cube root");
-DATA(insert OID = 232 ( dpow PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ dpow - _null_ ));
+DATA(insert OID = 232 ( dpow PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ dpow - _null_ ));
DESCR("exponentiation (x^y)");
-DATA(insert OID = 233 ( dexp PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dexp - _null_ ));
+DATA(insert OID = 233 ( dexp PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dexp - _null_ ));
DESCR("natural exponential (e^x)");
-DATA(insert OID = 234 ( dlog1 PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dlog1 - _null_ ));
+DATA(insert OID = 234 ( dlog1 PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dlog1 - _null_ ));
DESCR("natural logarithm");
-DATA(insert OID = 235 ( float8 PGNSP PGUID 12 f f t f i 1 701 "21" _null_ i2tod - _null_ ));
+DATA(insert OID = 235 ( float8 PGNSP PGUID 12 f f t f i 1 701 "21" _null_ i2tod - _null_ ));
DESCR("convert int2 to float8");
-DATA(insert OID = 236 ( float4 PGNSP PGUID 12 f f t f i 1 700 "21" _null_ i2tof - _null_ ));
+DATA(insert OID = 236 ( float4 PGNSP PGUID 12 f f t f i 1 700 "21" _null_ i2tof - _null_ ));
DESCR("convert int2 to float4");
-DATA(insert OID = 237 ( int2 PGNSP PGUID 12 f f t f i 1 21 "701" _null_ dtoi2 - _null_ ));
+DATA(insert OID = 237 ( int2 PGNSP PGUID 12 f f t f i 1 21 "701" _null_ dtoi2 - _null_ ));
DESCR("convert float8 to int2");
-DATA(insert OID = 238 ( int2 PGNSP PGUID 12 f f t f i 1 21 "700" _null_ ftoi2 - _null_ ));
+DATA(insert OID = 238 ( int2 PGNSP PGUID 12 f f t f i 1 21 "700" _null_ ftoi2 - _null_ ));
DESCR("convert float4 to int2");
-DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 f f t f i 2 701 "628 628" _null_ line_distance - _null_ ));
+DATA(insert OID = 239 ( line_distance PGNSP PGUID 12 f f t f i 2 701 "628 628" _null_ line_distance - _null_ ));
DESCR("distance between");
DATA(insert OID = 240 ( abstimein PGNSP PGUID 12 f f t f s 1 702 "2275" _null_ abstimein - _null_ ));
DESCR("I/O");
DATA(insert OID = 243 ( reltimeout PGNSP PGUID 12 f f t f s 1 2275 "703" _null_ reltimeout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 244 ( timepl PGNSP PGUID 12 f f t f i 2 702 "702 703" _null_ timepl - _null_ ));
+DATA(insert OID = 244 ( timepl PGNSP PGUID 12 f f t f i 2 702 "702 703" _null_ timepl - _null_ ));
DESCR("add");
-DATA(insert OID = 245 ( timemi PGNSP PGUID 12 f f t f i 2 702 "702 703" _null_ timemi - _null_ ));
+DATA(insert OID = 245 ( timemi PGNSP PGUID 12 f f t f i 2 702 "702 703" _null_ timemi - _null_ ));
DESCR("subtract");
DATA(insert OID = 246 ( tintervalin PGNSP PGUID 12 f f t f s 1 704 "2275" _null_ tintervalin - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 248 ( intinterval PGNSP PGUID 12 f f t f i 2 16 "702 704" _null_ intinterval - _null_ ));
DESCR("abstime in tinterval");
-DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 f f t f i 1 703 "704" _null_ tintervalrel - _null_ ));
+DATA(insert OID = 249 ( tintervalrel PGNSP PGUID 12 f f t f i 1 703 "704" _null_ tintervalrel - _null_ ));
DESCR("tinterval to reltime");
DATA(insert OID = 250 ( timenow PGNSP PGUID 12 f f t f s 0 702 "" _null_ timenow - _null_ ));
DESCR("Current date and time (abstime)");
DESCR("length less-than-or-equal");
DATA(insert OID = 271 ( tintervallenge PGNSP PGUID 12 f f t f i 2 16 "704 703" _null_ tintervallenge - _null_ ));
DESCR("length greater-than-or-equal");
-DATA(insert OID = 272 ( tintervalstart PGNSP PGUID 12 f f t f i 1 702 "704" _null_ tintervalstart - _null_ ));
+DATA(insert OID = 272 ( tintervalstart PGNSP PGUID 12 f f t f i 1 702 "704" _null_ tintervalstart - _null_ ));
DESCR("start of interval");
-DATA(insert OID = 273 ( tintervalend PGNSP PGUID 12 f f t f i 1 702 "704" _null_ tintervalend - _null_ ));
+DATA(insert OID = 273 ( tintervalend PGNSP PGUID 12 f f t f i 1 702 "704" _null_ tintervalend - _null_ ));
DESCR("end of interval");
DATA(insert OID = 274 ( timeofday PGNSP PGUID 12 f f t f v 0 25 "" _null_ timeofday - _null_ ));
DESCR("Current date and time - increments during transactions");
DATA(insert OID = 278 ( inter_lb PGNSP PGUID 12 f f t f i 2 16 "628 603" _null_ inter_lb - _null_ ));
DESCR("intersect?");
-DATA(insert OID = 279 ( float48mul PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48mul - _null_ ));
+DATA(insert OID = 279 ( float48mul PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 280 ( float48div PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48div - _null_ ));
+DATA(insert OID = 280 ( float48div PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48div - _null_ ));
DESCR("divide");
-DATA(insert OID = 281 ( float48pl PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48pl - _null_ ));
+DATA(insert OID = 281 ( float48pl PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48pl - _null_ ));
DESCR("add");
-DATA(insert OID = 282 ( float48mi PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48mi - _null_ ));
+DATA(insert OID = 282 ( float48mi PGNSP PGUID 12 f f t f i 2 701 "700 701" _null_ float48mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 283 ( float84mul PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84mul - _null_ ));
+DATA(insert OID = 283 ( float84mul PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 284 ( float84div PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84div - _null_ ));
+DATA(insert OID = 284 ( float84div PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84div - _null_ ));
DESCR("divide");
-DATA(insert OID = 285 ( float84pl PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84pl - _null_ ));
+DATA(insert OID = 285 ( float84pl PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84pl - _null_ ));
DESCR("add");
-DATA(insert OID = 286 ( float84mi PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84mi - _null_ ));
+DATA(insert OID = 286 ( float84mi PGNSP PGUID 12 f f t f i 2 701 "701 700" _null_ float84mi - _null_ ));
DESCR("subtract");
DATA(insert OID = 287 ( float4eq PGNSP PGUID 12 f f t f i 2 16 "700 700" _null_ float4eq - _null_ ));
DATA(insert OID = 310 ( float84ge PGNSP PGUID 12 f f t f i 2 16 "701 700" _null_ float84ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 311 ( float8 PGNSP PGUID 12 f f t f i 1 701 "700" _null_ ftod - _null_ ));
+DATA(insert OID = 311 ( float8 PGNSP PGUID 12 f f t f i 1 701 "700" _null_ ftod - _null_ ));
DESCR("convert float4 to float8");
-DATA(insert OID = 312 ( float4 PGNSP PGUID 12 f f t f i 1 700 "701" _null_ dtof - _null_ ));
+DATA(insert OID = 312 ( float4 PGNSP PGUID 12 f f t f i 1 700 "701" _null_ dtof - _null_ ));
DESCR("convert float8 to float4");
-DATA(insert OID = 313 ( int4 PGNSP PGUID 12 f f t f i 1 23 "21" _null_ i2toi4 - _null_ ));
+DATA(insert OID = 313 ( int4 PGNSP PGUID 12 f f t f i 1 23 "21" _null_ i2toi4 - _null_ ));
DESCR("convert int2 to int4");
-DATA(insert OID = 314 ( int2 PGNSP PGUID 12 f f t f i 1 21 "23" _null_ i4toi2 - _null_ ));
+DATA(insert OID = 314 ( int2 PGNSP PGUID 12 f f t f i 1 21 "23" _null_ i4toi2 - _null_ ));
DESCR("convert int4 to int2");
DATA(insert OID = 315 ( int2vectoreq PGNSP PGUID 12 f f t f i 2 16 "22 22" _null_ int2vectoreq - _null_ ));
DESCR("equal");
-DATA(insert OID = 316 ( float8 PGNSP PGUID 12 f f t f i 1 701 "23" _null_ i4tod - _null_ ));
+DATA(insert OID = 316 ( float8 PGNSP PGUID 12 f f t f i 1 701 "23" _null_ i4tod - _null_ ));
DESCR("convert int4 to float8");
-DATA(insert OID = 317 ( int4 PGNSP PGUID 12 f f t f i 1 23 "701" _null_ dtoi4 - _null_ ));
+DATA(insert OID = 317 ( int4 PGNSP PGUID 12 f f t f i 1 23 "701" _null_ dtoi4 - _null_ ));
DESCR("convert float8 to int4");
-DATA(insert OID = 318 ( float4 PGNSP PGUID 12 f f t f i 1 700 "23" _null_ i4tof - _null_ ));
+DATA(insert OID = 318 ( float4 PGNSP PGUID 12 f f t f i 1 700 "23" _null_ i4tof - _null_ ));
DESCR("convert int4 to float4");
-DATA(insert OID = 319 ( int4 PGNSP PGUID 12 f f t f i 1 23 "700" _null_ ftoi4 - _null_ ));
+DATA(insert OID = 319 ( int4 PGNSP PGUID 12 f f t f i 1 23 "700" _null_ ftoi4 - _null_ ));
DESCR("convert float4 to int4");
DATA(insert OID = 320 ( rtinsert PGNSP PGUID 12 f f t f v 6 2281 "2281 2281 2281 2281 2281 2281" _null_ rtinsert - _null_ ));
DESCR("r-tree(internal)");
DATA(insert OID = 323 ( rtbuild PGNSP PGUID 12 f f t f v 3 2278 "2281 2281 2281" _null_ rtbuild - _null_ ));
DESCR("r-tree(internal)");
-DATA(insert OID = 324 ( rtbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ rtbeginscan - _null_ ));
+DATA(insert OID = 324 ( rtbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ rtbeginscan - _null_ ));
DESCR("r-tree(internal)");
DATA(insert OID = 325 ( rtendscan PGNSP PGUID 12 f f t f v 1 2278 "2281" _null_ rtendscan - _null_ ));
DESCR("r-tree(internal)");
DESCR("btree(internal)");
DATA(insert OID = 331 ( btinsert PGNSP PGUID 12 f f t f v 6 2281 "2281 2281 2281 2281 2281 2281" _null_ btinsert - _null_ ));
DESCR("btree(internal)");
-DATA(insert OID = 333 ( btbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ btbeginscan - _null_ ));
+DATA(insert OID = 333 ( btbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ btbeginscan - _null_ ));
DESCR("btree(internal)");
DATA(insert OID = 334 ( btrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ btrescan - _null_ ));
DESCR("btree(internal)");
DATA(insert OID = 348 ( poly_out PGNSP PGUID 12 f f t f i 1 2275 "604" _null_ poly_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 350 ( btint2cmp PGNSP PGUID 12 f f t f i 2 23 "21 21" _null_ btint2cmp - _null_ ));
+DATA(insert OID = 350 ( btint2cmp PGNSP PGUID 12 f f t f i 2 23 "21 21" _null_ btint2cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 351 ( btint4cmp PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ btint4cmp - _null_ ));
+DATA(insert OID = 351 ( btint4cmp PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ btint4cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 842 ( btint8cmp PGNSP PGUID 12 f f t f i 2 23 "20 20" _null_ btint8cmp - _null_ ));
+DATA(insert OID = 842 ( btint8cmp PGNSP PGUID 12 f f t f i 2 23 "20 20" _null_ btint8cmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 354 ( btfloat4cmp PGNSP PGUID 12 f f t f i 2 23 "700 700" _null_ btfloat4cmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 355 ( btfloat8cmp PGNSP PGUID 12 f f t f i 2 23 "701 701" _null_ btfloat8cmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 356 ( btoidcmp PGNSP PGUID 12 f f t f i 2 23 "26 26" _null_ btoidcmp - _null_ ));
+DATA(insert OID = 356 ( btoidcmp PGNSP PGUID 12 f f t f i 2 23 "26 26" _null_ btoidcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 404 ( btoidvectorcmp PGNSP PGUID 12 f f t f i 2 23 "30 30" _null_ btoidvectorcmp - _null_ ));
+DATA(insert OID = 404 ( btoidvectorcmp PGNSP PGUID 12 f f t f i 2 23 "30 30" _null_ btoidvectorcmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 357 ( btabstimecmp PGNSP PGUID 12 f f t f i 2 23 "702 702" _null_ btabstimecmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 358 ( btcharcmp PGNSP PGUID 12 f f t f i 2 23 "18 18" _null_ btcharcmp - _null_ ));
+DATA(insert OID = 358 ( btcharcmp PGNSP PGUID 12 f f t f i 2 23 "18 18" _null_ btcharcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 359 ( btnamecmp PGNSP PGUID 12 f f t f i 2 23 "19 19" _null_ btnamecmp - _null_ ));
+DATA(insert OID = 359 ( btnamecmp PGNSP PGUID 12 f f t f i 2 23 "19 19" _null_ btnamecmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 360 ( bttextcmp PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ bttextcmp - _null_ ));
+DATA(insert OID = 360 ( bttextcmp PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ bttextcmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 377 ( cash_cmp PGNSP PGUID 12 f f t f i 2 23 "790 790" _null_ cash_cmp - _null_ ));
DESCR("btree less-equal-greater");
DATA(insert OID = 380 ( btreltimecmp PGNSP PGUID 12 f f t f i 2 23 "703 703" _null_ btreltimecmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 381 ( bttintervalcmp PGNSP PGUID 12 f f t f i 2 23 "704 704" _null_ bttintervalcmp - _null_ ));
+DATA(insert OID = 381 ( bttintervalcmp PGNSP PGUID 12 f f t f i 2 23 "704 704" _null_ bttintervalcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 382 ( btarraycmp PGNSP PGUID 12 f f t f i 2 23 "2277 2277" _null_ btarraycmp - _null_ ));
+DATA(insert OID = 382 ( btarraycmp PGNSP PGUID 12 f f t f i 2 23 "2277 2277" _null_ btarraycmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 361 ( lseg_distance PGNSP PGUID 12 f f t f i 2 701 "601 601" _null_ lseg_distance - _null_ ));
+DATA(insert OID = 361 ( lseg_distance PGNSP PGUID 12 f f t f i 2 701 "601 601" _null_ lseg_distance - _null_ ));
DESCR("distance between");
-DATA(insert OID = 362 ( lseg_interpt PGNSP PGUID 12 f f t f i 2 600 "601 601" _null_ lseg_interpt - _null_ ));
+DATA(insert OID = 362 ( lseg_interpt PGNSP PGUID 12 f f t f i 2 600 "601 601" _null_ lseg_interpt - _null_ ));
DESCR("intersection point");
-DATA(insert OID = 363 ( dist_ps PGNSP PGUID 12 f f t f i 2 701 "600 601" _null_ dist_ps - _null_ ));
+DATA(insert OID = 363 ( dist_ps PGNSP PGUID 12 f f t f i 2 701 "600 601" _null_ dist_ps - _null_ ));
DESCR("distance between");
-DATA(insert OID = 364 ( dist_pb PGNSP PGUID 12 f f t f i 2 701 "600 603" _null_ dist_pb - _null_ ));
+DATA(insert OID = 364 ( dist_pb PGNSP PGUID 12 f f t f i 2 701 "600 603" _null_ dist_pb - _null_ ));
DESCR("distance between point and box");
-DATA(insert OID = 365 ( dist_sb PGNSP PGUID 12 f f t f i 2 701 "601 603" _null_ dist_sb - _null_ ));
+DATA(insert OID = 365 ( dist_sb PGNSP PGUID 12 f f t f i 2 701 "601 603" _null_ dist_sb - _null_ ));
DESCR("distance between segment and box");
-DATA(insert OID = 366 ( close_ps PGNSP PGUID 12 f f t f i 2 600 "600 601" _null_ close_ps - _null_ ));
+DATA(insert OID = 366 ( close_ps PGNSP PGUID 12 f f t f i 2 600 "600 601" _null_ close_ps - _null_ ));
DESCR("closest point on line segment");
-DATA(insert OID = 367 ( close_pb PGNSP PGUID 12 f f t f i 2 600 "600 603" _null_ close_pb - _null_ ));
+DATA(insert OID = 367 ( close_pb PGNSP PGUID 12 f f t f i 2 600 "600 603" _null_ close_pb - _null_ ));
DESCR("closest point on box");
-DATA(insert OID = 368 ( close_sb PGNSP PGUID 12 f f t f i 2 600 "601 603" _null_ close_sb - _null_ ));
+DATA(insert OID = 368 ( close_sb PGNSP PGUID 12 f f t f i 2 600 "601 603" _null_ close_sb - _null_ ));
DESCR("closest point to line segment on box");
DATA(insert OID = 369 ( on_ps PGNSP PGUID 12 f f t f i 2 16 "600 601" _null_ on_ps - _null_ ));
DESCR("point contained in segment?");
-DATA(insert OID = 370 ( path_distance PGNSP PGUID 12 f f t f i 2 701 "602 602" _null_ path_distance - _null_ ));
+DATA(insert OID = 370 ( path_distance PGNSP PGUID 12 f f t f i 2 701 "602 602" _null_ path_distance - _null_ ));
DESCR("distance between paths");
-DATA(insert OID = 371 ( dist_ppath PGNSP PGUID 12 f f t f i 2 701 "600 602" _null_ dist_ppath - _null_ ));
+DATA(insert OID = 371 ( dist_ppath PGNSP PGUID 12 f f t f i 2 701 "600 602" _null_ dist_ppath - _null_ ));
DESCR("distance between point and path");
DATA(insert OID = 372 ( on_sb PGNSP PGUID 12 f f t f i 2 16 "601 603" _null_ on_sb - _null_ ));
DESCR("lseg contained in box?");
/* OIDS 400 - 499 */
-DATA(insert OID = 401 ( text PGNSP PGUID 12 f f t f i 1 25 "1042" _null_ rtrim1 - _null_ ));
+DATA(insert OID = 401 ( text PGNSP PGUID 12 f f t f i 1 25 "1042" _null_ rtrim1 - _null_ ));
DESCR("convert char(n) to text");
DATA(insert OID = 406 ( text PGNSP PGUID 12 f f t f i 1 25 "19" _null_ name_text - _null_ ));
DESCR("convert name to text");
DESCR("convert text to name");
DATA(insert OID = 408 ( bpchar PGNSP PGUID 12 f f t f i 1 1042 "19" _null_ name_bpchar - _null_ ));
DESCR("convert name to char(n)");
-DATA(insert OID = 409 ( name PGNSP PGUID 12 f f t f i 1 19 "1042" _null_ bpchar_name - _null_ ));
+DATA(insert OID = 409 ( name PGNSP PGUID 12 f f t f i 1 19 "1042" _null_ bpchar_name - _null_ ));
DESCR("convert char(n) to name");
DATA(insert OID = 440 ( hashgettuple PGNSP PGUID 12 f f t f v 2 16 "2281 2281" _null_ hashgettuple - _null_ ));
DESCR("hash(internal)");
DATA(insert OID = 441 ( hashinsert PGNSP PGUID 12 f f t f v 6 2281 "2281 2281 2281 2281 2281 2281" _null_ hashinsert - _null_ ));
DESCR("hash(internal)");
-DATA(insert OID = 443 ( hashbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ hashbeginscan - _null_ ));
+DATA(insert OID = 443 ( hashbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ hashbeginscan - _null_ ));
DESCR("hash(internal)");
DATA(insert OID = 444 ( hashrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ hashrescan - _null_ ));
DESCR("hash(internal)");
DESCR("hash any varlena type");
DATA(insert OID = 457 ( hashoidvector PGNSP PGUID 12 f f t f i 1 23 "30" _null_ hashoidvector - _null_ ));
DESCR("hash");
-DATA(insert OID = 329 ( hash_aclitem PGNSP PGUID 12 f f t f i 1 23 "1033" _null_ hash_aclitem - _null_ ));
+DATA(insert OID = 329 ( hash_aclitem PGNSP PGUID 12 f f t f i 1 23 "1033" _null_ hash_aclitem - _null_ ));
DESCR("hash");
DATA(insert OID = 398 ( hashint2vector PGNSP PGUID 12 f f t f i 1 23 "22" _null_ hashint2vector - _null_ ));
DESCR("hash");
DESCR("hash");
DATA(insert OID = 422 ( hashinet PGNSP PGUID 12 f f t f i 1 23 "869" _null_ hashinet - _null_ ));
DESCR("hash");
-DATA(insert OID = 458 ( text_larger PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ text_larger - _null_ ));
+DATA(insert OID = 458 ( text_larger PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ text_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 459 ( text_smaller PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ text_smaller - _null_ ));
+DATA(insert OID = 459 ( text_smaller PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ text_smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 460 ( int8in PGNSP PGUID 12 f f t f i 1 20 "2275" _null_ int8in - _null_ ));
DESCR("I/O");
DATA(insert OID = 462 ( int8um PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8um - _null_ ));
DESCR("negate");
-DATA(insert OID = 463 ( int8pl PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8pl - _null_ ));
+DATA(insert OID = 463 ( int8pl PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8pl - _null_ ));
DESCR("add");
-DATA(insert OID = 464 ( int8mi PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mi - _null_ ));
+DATA(insert OID = 464 ( int8mi PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 465 ( int8mul PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mul - _null_ ));
+DATA(insert OID = 465 ( int8mul PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 466 ( int8div PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8div - _null_ ));
+DATA(insert OID = 466 ( int8div PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8div - _null_ ));
DESCR("divide");
-DATA(insert OID = 467 ( int8eq PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8eq - _null_ ));
+DATA(insert OID = 467 ( int8eq PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 468 ( int8ne PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8ne - _null_ ));
+DATA(insert OID = 468 ( int8ne PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 469 ( int8lt PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8lt - _null_ ));
+DATA(insert OID = 469 ( int8lt PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 470 ( int8gt PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8gt - _null_ ));
+DATA(insert OID = 470 ( int8gt PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 471 ( int8le PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8le - _null_ ));
+DATA(insert OID = 471 ( int8le PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 472 ( int8ge PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8ge - _null_ ));
+DATA(insert OID = 472 ( int8ge PGNSP PGUID 12 f f t f i 2 16 "20 20" _null_ int8ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 474 ( int84eq PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84eq - _null_ ));
+DATA(insert OID = 474 ( int84eq PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 475 ( int84ne PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84ne - _null_ ));
+DATA(insert OID = 475 ( int84ne PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 476 ( int84lt PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84lt - _null_ ));
+DATA(insert OID = 476 ( int84lt PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 477 ( int84gt PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84gt - _null_ ));
+DATA(insert OID = 477 ( int84gt PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 478 ( int84le PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84le - _null_ ));
+DATA(insert OID = 478 ( int84le PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 479 ( int84ge PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84ge - _null_ ));
+DATA(insert OID = 479 ( int84ge PGNSP PGUID 12 f f t f i 2 16 "20 23" _null_ int84ge - _null_ ));
DESCR("greater-than-or-equal");
DATA(insert OID = 480 ( int4 PGNSP PGUID 12 f f t f i 1 23 "20" _null_ int84 - _null_ ));
DESCR("convert int4 to int8");
DATA(insert OID = 482 ( float8 PGNSP PGUID 12 f f t f i 1 701 "20" _null_ i8tod - _null_ ));
DESCR("convert int8 to float8");
-DATA(insert OID = 483 ( int8 PGNSP PGUID 12 f f t f i 1 20 "701" _null_ dtoi8 - _null_ ));
+DATA(insert OID = 483 ( int8 PGNSP PGUID 12 f f t f i 1 20 "701" _null_ dtoi8 - _null_ ));
DESCR("convert float8 to int8");
/* OIDS 500 - 599 */
DATA(insert OID = 652 ( float4 PGNSP PGUID 12 f f t f i 1 700 "20" _null_ i8tof - _null_ ));
DESCR("convert int8 to float4");
-DATA(insert OID = 653 ( int8 PGNSP PGUID 12 f f t f i 1 20 "700" _null_ ftoi8 - _null_ ));
+DATA(insert OID = 653 ( int8 PGNSP PGUID 12 f f t f i 1 20 "700" _null_ ftoi8 - _null_ ));
DESCR("convert float4 to int8");
DATA(insert OID = 714 ( int2 PGNSP PGUID 12 f f t f i 1 21 "20" _null_ int82 - _null_ ));
DATA(insert OID = 754 ( int8 PGNSP PGUID 12 f f t f i 1 20 "21" _null_ int28 - _null_ ));
DESCR("convert int2 to int8");
-DATA(insert OID = 1285 ( int4notin PGNSP PGUID 12 f f t f s 2 16 "23 25" _null_ int4notin - _null_ ));
+DATA(insert OID = 1285 ( int4notin PGNSP PGUID 12 f f t f s 2 16 "23 25" _null_ int4notin - _null_ ));
DESCR("not in");
-DATA(insert OID = 1286 ( oidnotin PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ oidnotin - _null_ ));
+DATA(insert OID = 1286 ( oidnotin PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ oidnotin - _null_ ));
DESCR("not in");
-DATA(insert OID = 655 ( namelt PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namelt - _null_ ));
+DATA(insert OID = 655 ( namelt PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namelt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 656 ( namele PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namele - _null_ ));
+DATA(insert OID = 656 ( namele PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namele - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 657 ( namegt PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namegt - _null_ ));
+DATA(insert OID = 657 ( namegt PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namegt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 658 ( namege PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namege - _null_ ));
+DATA(insert OID = 658 ( namege PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namege - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 659 ( namene PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namene - _null_ ));
+DATA(insert OID = 659 ( namene PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ namene - _null_ ));
DESCR("not equal");
DATA(insert OID = 668 ( bpchar PGNSP PGUID 12 f f t f i 3 1042 "1042 23 16" _null_ bpchar - _null_ ));
DATA(insert OID = 676 ( mktinterval PGNSP PGUID 12 f f t f i 2 704 "702 702" _null_ mktinterval - _null_ ));
DESCR("convert to tinterval");
-DATA(insert OID = 619 ( oidvectorne PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorne - _null_ ));
+DATA(insert OID = 619 ( oidvectorne PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 677 ( oidvectorlt PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorlt - _null_ ));
+DATA(insert OID = 677 ( oidvectorlt PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorlt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 678 ( oidvectorle PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorle - _null_ ));
+DATA(insert OID = 678 ( oidvectorle PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 679 ( oidvectoreq PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectoreq - _null_ ));
+DATA(insert OID = 679 ( oidvectoreq PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectoreq - _null_ ));
DESCR("equal");
-DATA(insert OID = 680 ( oidvectorge PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorge - _null_ ));
+DATA(insert OID = 680 ( oidvectorge PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 681 ( oidvectorgt PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorgt - _null_ ));
+DATA(insert OID = 681 ( oidvectorgt PGNSP PGUID 12 f f t f i 2 16 "30 30" _null_ oidvectorgt - _null_ ));
DESCR("greater-than");
/* OIDS 700 - 799 */
DATA(insert OID = 710 ( getpgusername PGNSP PGUID 12 f f t f s 0 19 "" _null_ current_user - _null_ ));
DESCR("deprecated -- use current_user");
-DATA(insert OID = 716 ( oidlt PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidlt - _null_ ));
+DATA(insert OID = 716 ( oidlt PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidlt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 717 ( oidle PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidle - _null_ ));
+DATA(insert OID = 717 ( oidle PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidle - _null_ ));
DESCR("less-than-or-equal");
DATA(insert OID = 720 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ byteaoctetlen - _null_ ));
DESCR("octet length");
-DATA(insert OID = 721 ( get_byte PGNSP PGUID 12 f f t f i 2 23 "17 23" _null_ byteaGetByte - _null_ ));
+DATA(insert OID = 721 ( get_byte PGNSP PGUID 12 f f t f i 2 23 "17 23" _null_ byteaGetByte - _null_ ));
DESCR("get byte");
-DATA(insert OID = 722 ( set_byte PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ byteaSetByte - _null_ ));
+DATA(insert OID = 722 ( set_byte PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ byteaSetByte - _null_ ));
DESCR("set byte");
-DATA(insert OID = 723 ( get_bit PGNSP PGUID 12 f f t f i 2 23 "17 23" _null_ byteaGetBit - _null_ ));
+DATA(insert OID = 723 ( get_bit PGNSP PGUID 12 f f t f i 2 23 "17 23" _null_ byteaGetBit - _null_ ));
DESCR("get bit");
-DATA(insert OID = 724 ( set_bit PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ byteaSetBit - _null_ ));
+DATA(insert OID = 724 ( set_bit PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ byteaSetBit - _null_ ));
DESCR("set bit");
-DATA(insert OID = 725 ( dist_pl PGNSP PGUID 12 f f t f i 2 701 "600 628" _null_ dist_pl - _null_ ));
+DATA(insert OID = 725 ( dist_pl PGNSP PGUID 12 f f t f i 2 701 "600 628" _null_ dist_pl - _null_ ));
DESCR("distance between point and line");
-DATA(insert OID = 726 ( dist_lb PGNSP PGUID 12 f f t f i 2 701 "628 603" _null_ dist_lb - _null_ ));
+DATA(insert OID = 726 ( dist_lb PGNSP PGUID 12 f f t f i 2 701 "628 603" _null_ dist_lb - _null_ ));
DESCR("distance between line and box");
-DATA(insert OID = 727 ( dist_sl PGNSP PGUID 12 f f t f i 2 701 "601 628" _null_ dist_sl - _null_ ));
+DATA(insert OID = 727 ( dist_sl PGNSP PGUID 12 f f t f i 2 701 "601 628" _null_ dist_sl - _null_ ));
DESCR("distance between lseg and line");
-DATA(insert OID = 728 ( dist_cpoly PGNSP PGUID 12 f f t f i 2 701 "718 604" _null_ dist_cpoly - _null_ ));
+DATA(insert OID = 728 ( dist_cpoly PGNSP PGUID 12 f f t f i 2 701 "718 604" _null_ dist_cpoly - _null_ ));
DESCR("distance between");
-DATA(insert OID = 729 ( poly_distance PGNSP PGUID 12 f f t f i 2 701 "604 604" _null_ poly_distance - _null_ ));
+DATA(insert OID = 729 ( poly_distance PGNSP PGUID 12 f f t f i 2 701 "604 604" _null_ poly_distance - _null_ ));
DESCR("distance between");
-DATA(insert OID = 740 ( text_lt PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_lt - _null_ ));
+DATA(insert OID = 740 ( text_lt PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 741 ( text_le PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_le - _null_ ));
+DATA(insert OID = 741 ( text_le PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 742 ( text_gt PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_gt - _null_ ));
+DATA(insert OID = 742 ( text_gt PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 743 ( text_ge PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_ge - _null_ ));
+DATA(insert OID = 743 ( text_ge PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ text_ge - _null_ ));
DESCR("greater-than-or-equal");
DATA(insert OID = 745 ( current_user PGNSP PGUID 12 f f t f s 0 19 "" _null_ current_user - _null_ ));
DESCR("array greater than or equal");
DATA(insert OID = 747 ( array_dims PGNSP PGUID 12 f f t f i 1 25 "2277" _null_ array_dims - _null_ ));
DESCR("array dimensions");
-DATA(insert OID = 750 ( array_in PGNSP PGUID 12 f f t f s 3 2277 "2275 26 23" _null_ array_in - _null_ ));
+DATA(insert OID = 750 ( array_in PGNSP PGUID 12 f f t f s 3 2277 "2275 26 23" _null_ array_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 751 ( array_out PGNSP PGUID 12 f f t f s 1 2275 "2277" _null_ array_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 764 ( lo_import PGNSP PGUID 12 f f t f v 1 26 "25" _null_ lo_import - _null_ ));
DESCR("large object import");
-DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 f f t f v 2 23 "26 25" _null_ lo_export - _null_ ));
+DATA(insert OID = 765 ( lo_export PGNSP PGUID 12 f f t f v 2 23 "26 25" _null_ lo_export - _null_ ));
DESCR("large object export");
DATA(insert OID = 766 ( int4inc PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4inc - _null_ ));
DESCR("increment");
-DATA(insert OID = 768 ( int4larger PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4larger - _null_ ));
+DATA(insert OID = 768 ( int4larger PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 769 ( int4smaller PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4smaller - _null_ ));
+DATA(insert OID = 769 ( int4smaller PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 770 ( int2larger PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2larger - _null_ ));
+DATA(insert OID = 770 ( int2larger PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 771 ( int2smaller PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2smaller - _null_ ));
+DATA(insert OID = 771 ( int2smaller PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2smaller - _null_ ));
DESCR("smaller of two");
DATA(insert OID = 774 ( gistgettuple PGNSP PGUID 12 f f t f v 2 16 "2281 2281" _null_ gistgettuple - _null_ ));
DESCR("gist(internal)");
DATA(insert OID = 775 ( gistinsert PGNSP PGUID 12 f f t f v 6 2281 "2281 2281 2281 2281 2281 2281" _null_ gistinsert - _null_ ));
DESCR("gist(internal)");
-DATA(insert OID = 777 ( gistbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ gistbeginscan - _null_ ));
+DATA(insert OID = 777 ( gistbeginscan PGNSP PGUID 12 f f t f v 3 2281 "2281 2281 2281" _null_ gistbeginscan - _null_ ));
DESCR("gist(internal)");
DATA(insert OID = 778 ( gistrescan PGNSP PGUID 12 f f t f v 2 2278 "2281 2281" _null_ gistrescan - _null_ ));
DESCR("gist(internal)");
DESCR("convert text to float8");
DATA(insert OID = 839 ( float4 PGNSP PGUID 12 f f t f i 1 700 "25" _null_ text_float4 - _null_ ));
DESCR("convert text to float4");
-DATA(insert OID = 840 ( text PGNSP PGUID 12 f f t f i 1 25 "701" _null_ float8_text - _null_ ));
+DATA(insert OID = 840 ( text PGNSP PGUID 12 f f t f i 1 25 "701" _null_ float8_text - _null_ ));
DESCR("convert float8 to text");
-DATA(insert OID = 841 ( text PGNSP PGUID 12 f f t f i 1 25 "700" _null_ float4_text - _null_ ));
+DATA(insert OID = 841 ( text PGNSP PGUID 12 f f t f i 1 25 "700" _null_ float4_text - _null_ ));
DESCR("convert float4 to text");
-DATA(insert OID = 846 ( cash_mul_flt4 PGNSP PGUID 12 f f t f i 2 790 "790 700" _null_ cash_mul_flt4 - _null_ ));
+DATA(insert OID = 846 ( cash_mul_flt4 PGNSP PGUID 12 f f t f i 2 790 "790 700" _null_ cash_mul_flt4 - _null_ ));
DESCR("multiply");
-DATA(insert OID = 847 ( cash_div_flt4 PGNSP PGUID 12 f f t f i 2 790 "790 700" _null_ cash_div_flt4 - _null_ ));
+DATA(insert OID = 847 ( cash_div_flt4 PGNSP PGUID 12 f f t f i 2 790 "790 700" _null_ cash_div_flt4 - _null_ ));
DESCR("divide");
-DATA(insert OID = 848 ( flt4_mul_cash PGNSP PGUID 12 f f t f i 2 790 "700 790" _null_ flt4_mul_cash - _null_ ));
+DATA(insert OID = 848 ( flt4_mul_cash PGNSP PGUID 12 f f t f i 2 790 "700 790" _null_ flt4_mul_cash - _null_ ));
DESCR("multiply");
DATA(insert OID = 849 ( position PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ textpos - _null_ ));
DATA(insert OID = 851 ( textnlike PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ textnlike - _null_ ));
DESCR("does not match LIKE expression");
-DATA(insert OID = 852 ( int48eq PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48eq - _null_ ));
+DATA(insert OID = 852 ( int48eq PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 853 ( int48ne PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48ne - _null_ ));
+DATA(insert OID = 853 ( int48ne PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 854 ( int48lt PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48lt - _null_ ));
+DATA(insert OID = 854 ( int48lt PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 855 ( int48gt PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48gt - _null_ ));
+DATA(insert OID = 855 ( int48gt PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 856 ( int48le PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48le - _null_ ));
+DATA(insert OID = 856 ( int48le PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 857 ( int48ge PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48ge - _null_ ));
+DATA(insert OID = 857 ( int48ge PGNSP PGUID 12 f f t f i 2 16 "23 20" _null_ int48ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 858 ( namelike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ namelike - _null_ ));
+DATA(insert OID = 858 ( namelike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ namelike - _null_ ));
DESCR("matches LIKE expression");
-DATA(insert OID = 859 ( namenlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ namenlike - _null_ ));
+DATA(insert OID = 859 ( namenlike PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ namenlike - _null_ ));
DESCR("does not match LIKE expression");
-DATA(insert OID = 860 ( bpchar PGNSP PGUID 12 f f t f i 1 1042 "18" _null_ char_bpchar - _null_ ));
+DATA(insert OID = 860 ( bpchar PGNSP PGUID 12 f f t f i 1 1042 "18" _null_ char_bpchar - _null_ ));
DESCR("convert char to char()");
DATA(insert OID = 861 ( current_database PGNSP PGUID 12 f f t f i 0 19 "" _null_ current_database - _null_ ));
DESCR("I/O");
DATA(insert OID = 887 ( cash_out PGNSP PGUID 12 f f t f i 1 2275 "790" _null_ cash_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_eq - _null_ ));
+DATA(insert OID = 888 ( cash_eq PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_ne - _null_ ));
+DATA(insert OID = 889 ( cash_ne PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 890 ( cash_lt PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_lt - _null_ ));
+DATA(insert OID = 890 ( cash_lt PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 891 ( cash_le PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_le - _null_ ));
+DATA(insert OID = 891 ( cash_le PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 892 ( cash_gt PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_gt - _null_ ));
+DATA(insert OID = 892 ( cash_gt PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 893 ( cash_ge PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_ge - _null_ ));
+DATA(insert OID = 893 ( cash_ge PGNSP PGUID 12 f f t f i 2 16 "790 790" _null_ cash_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 894 ( cash_pl PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cash_pl - _null_ ));
+DATA(insert OID = 894 ( cash_pl PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cash_pl - _null_ ));
DESCR("add");
-DATA(insert OID = 895 ( cash_mi PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cash_mi - _null_ ));
+DATA(insert OID = 895 ( cash_mi PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cash_mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 896 ( cash_mul_flt8 PGNSP PGUID 12 f f t f i 2 790 "790 701" _null_ cash_mul_flt8 - _null_ ));
+DATA(insert OID = 896 ( cash_mul_flt8 PGNSP PGUID 12 f f t f i 2 790 "790 701" _null_ cash_mul_flt8 - _null_ ));
DESCR("multiply");
-DATA(insert OID = 897 ( cash_div_flt8 PGNSP PGUID 12 f f t f i 2 790 "790 701" _null_ cash_div_flt8 - _null_ ));
+DATA(insert OID = 897 ( cash_div_flt8 PGNSP PGUID 12 f f t f i 2 790 "790 701" _null_ cash_div_flt8 - _null_ ));
DESCR("divide");
-DATA(insert OID = 898 ( cashlarger PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cashlarger - _null_ ));
+DATA(insert OID = 898 ( cashlarger PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cashlarger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 899 ( cashsmaller PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cashsmaller - _null_ ));
+DATA(insert OID = 899 ( cashsmaller PGNSP PGUID 12 f f t f i 2 790 "790 790" _null_ cashsmaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 919 ( flt8_mul_cash PGNSP PGUID 12 f f t f i 2 790 "701 790" _null_ flt8_mul_cash - _null_ ));
+DATA(insert OID = 919 ( flt8_mul_cash PGNSP PGUID 12 f f t f i 2 790 "701 790" _null_ flt8_mul_cash - _null_ ));
DESCR("multiply");
-DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 f f t f i 1 25 "790" _null_ cash_words - _null_ ));
+DATA(insert OID = 935 ( cash_words PGNSP PGUID 12 f f t f i 1 25 "790" _null_ cash_words - _null_ ));
DESCR("output amount as words");
/* OIDS 900 - 999 */
-DATA(insert OID = 940 ( mod PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mod - _null_ ));
+DATA(insert OID = 940 ( mod PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 941 ( mod PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mod - _null_ ));
+DATA(insert OID = 941 ( mod PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 942 ( mod PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mod - _null_ ));
+DATA(insert OID = 942 ( mod PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ int24mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 943 ( mod PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mod - _null_ ));
+DATA(insert OID = 943 ( mod PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ int42mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 945 ( int8mod PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mod - _null_ ));
+DATA(insert OID = 945 ( int8mod PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 947 ( mod PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mod - _null_ ));
+DATA(insert OID = 947 ( mod PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8mod - _null_ ));
DESCR("modulus");
DATA(insert OID = 944 ( char PGNSP PGUID 12 f f t f i 1 18 "25" _null_ text_char - _null_ ));
DATA(insert OID = 951 ( isfalse PGNSP PGUID 12 f f f f i 1 16 "16" _null_ isfalse - _null_ ));
DESCR("bool is false (not true or unknown)");
-DATA(insert OID = 952 ( lo_open PGNSP PGUID 12 f f t f v 2 23 "26 23" _null_ lo_open - _null_ ));
+DATA(insert OID = 952 ( lo_open PGNSP PGUID 12 f f t f v 2 23 "26 23" _null_ lo_open - _null_ ));
DESCR("large object open");
DATA(insert OID = 953 ( lo_close PGNSP PGUID 12 f f t f v 1 23 "23" _null_ lo_close - _null_ ));
DESCR("large object close");
-DATA(insert OID = 954 ( loread PGNSP PGUID 12 f f t f v 2 17 "23 23" _null_ loread - _null_ ));
+DATA(insert OID = 954 ( loread PGNSP PGUID 12 f f t f v 2 17 "23 23" _null_ loread - _null_ ));
DESCR("large object read");
-DATA(insert OID = 955 ( lowrite PGNSP PGUID 12 f f t f v 2 23 "23 17" _null_ lowrite - _null_ ));
+DATA(insert OID = 955 ( lowrite PGNSP PGUID 12 f f t f v 2 23 "23 17" _null_ lowrite - _null_ ));
DESCR("large object write");
-DATA(insert OID = 956 ( lo_lseek PGNSP PGUID 12 f f t f v 3 23 "23 23 23" _null_ lo_lseek - _null_ ));
+DATA(insert OID = 956 ( lo_lseek PGNSP PGUID 12 f f t f v 3 23 "23 23 23" _null_ lo_lseek - _null_ ));
DESCR("large object seek");
DATA(insert OID = 957 ( lo_creat PGNSP PGUID 12 f f t f v 1 26 "23" _null_ lo_creat - _null_ ));
DESCR("large object create");
DATA(insert OID = 958 ( lo_tell PGNSP PGUID 12 f f t f v 1 23 "23" _null_ lo_tell - _null_ ));
DESCR("large object position");
-DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 f f t f i 2 16 "600 628" _null_ on_pl - _null_ ));
+DATA(insert OID = 959 ( on_pl PGNSP PGUID 12 f f t f i 2 16 "600 628" _null_ on_pl - _null_ ));
DESCR("point on line?");
-DATA(insert OID = 960 ( on_sl PGNSP PGUID 12 f f t f i 2 16 "601 628" _null_ on_sl - _null_ ));
+DATA(insert OID = 960 ( on_sl PGNSP PGUID 12 f f t f i 2 16 "601 628" _null_ on_sl - _null_ ));
DESCR("lseg on line?");
-DATA(insert OID = 961 ( close_pl PGNSP PGUID 12 f f t f i 2 600 "600 628" _null_ close_pl - _null_ ));
+DATA(insert OID = 961 ( close_pl PGNSP PGUID 12 f f t f i 2 600 "600 628" _null_ close_pl - _null_ ));
DESCR("closest point on line");
-DATA(insert OID = 962 ( close_sl PGNSP PGUID 12 f f t f i 2 600 "601 628" _null_ close_sl - _null_ ));
+DATA(insert OID = 962 ( close_sl PGNSP PGUID 12 f f t f i 2 600 "601 628" _null_ close_sl - _null_ ));
DESCR("closest point to line segment on line");
-DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 f f t f i 2 600 "628 603" _null_ close_lb - _null_ ));
+DATA(insert OID = 963 ( close_lb PGNSP PGUID 12 f f t f i 2 600 "628 603" _null_ close_lb - _null_ ));
DESCR("closest point to line on box");
DATA(insert OID = 964 ( lo_unlink PGNSP PGUID 12 f f t f v 1 23 "26" _null_ lo_unlink - _null_ ));
DESCR("large object unlink(delete)");
-DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ path_inter - _null_ ));
+DATA(insert OID = 973 ( path_inter PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ path_inter - _null_ ));
DESCR("intersect?");
-DATA(insert OID = 975 ( area PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_area - _null_ ));
+DATA(insert OID = 975 ( area PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_area - _null_ ));
DESCR("box area");
-DATA(insert OID = 976 ( width PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_width - _null_ ));
+DATA(insert OID = 976 ( width PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_width - _null_ ));
DESCR("box width");
-DATA(insert OID = 977 ( height PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_height - _null_ ));
+DATA(insert OID = 977 ( height PGNSP PGUID 12 f f t f i 1 701 "603" _null_ box_height - _null_ ));
DESCR("box height");
-DATA(insert OID = 978 ( box_distance PGNSP PGUID 12 f f t f i 2 701 "603 603" _null_ box_distance - _null_ ));
+DATA(insert OID = 978 ( box_distance PGNSP PGUID 12 f f t f i 2 701 "603 603" _null_ box_distance - _null_ ));
DESCR("distance between boxes");
-DATA(insert OID = 979 ( area PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_area - _null_ ));
+DATA(insert OID = 979 ( area PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_area - _null_ ));
DESCR("area of a closed path");
-DATA(insert OID = 980 ( box_intersect PGNSP PGUID 12 f f t f i 2 603 "603 603" _null_ box_intersect - _null_ ));
+DATA(insert OID = 980 ( box_intersect PGNSP PGUID 12 f f t f i 2 603 "603 603" _null_ box_intersect - _null_ ));
DESCR("box intersection (another box)");
-DATA(insert OID = 981 ( diagonal PGNSP PGUID 12 f f t f i 1 601 "603" _null_ box_diagonal - _null_ ));
+DATA(insert OID = 981 ( diagonal PGNSP PGUID 12 f f t f i 1 601 "603" _null_ box_diagonal - _null_ ));
DESCR("box diagonal");
DATA(insert OID = 982 ( path_n_lt PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ path_n_lt - _null_ ));
DESCR("less-than");
DESCR("less-than-or-equal");
DATA(insert OID = 986 ( path_n_ge PGNSP PGUID 12 f f t f i 2 16 "602 602" _null_ path_n_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 987 ( path_length PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_length - _null_ ));
+DATA(insert OID = 987 ( path_length PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_length - _null_ ));
DESCR("sum of path segment lengths");
DATA(insert OID = 988 ( point_ne PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ point_ne - _null_ ));
DESCR("not equal");
DESCR("vertically aligned?");
DATA(insert OID = 990 ( point_horiz PGNSP PGUID 12 f f t f i 2 16 "600 600" _null_ point_horiz - _null_ ));
DESCR("horizontally aligned?");
-DATA(insert OID = 991 ( point_distance PGNSP PGUID 12 f f t f i 2 701 "600 600" _null_ point_distance - _null_ ));
+DATA(insert OID = 991 ( point_distance PGNSP PGUID 12 f f t f i 2 701 "600 600" _null_ point_distance - _null_ ));
DESCR("distance between");
-DATA(insert OID = 992 ( slope PGNSP PGUID 12 f f t f i 2 701 "600 600" _null_ point_slope - _null_ ));
+DATA(insert OID = 992 ( slope PGNSP PGUID 12 f f t f i 2 701 "600 600" _null_ point_slope - _null_ ));
DESCR("slope between points");
-DATA(insert OID = 993 ( lseg PGNSP PGUID 12 f f t f i 2 601 "600 600" _null_ lseg_construct - _null_ ));
+DATA(insert OID = 993 ( lseg PGNSP PGUID 12 f f t f i 2 601 "600 600" _null_ lseg_construct - _null_ ));
DESCR("convert points to line segment");
DATA(insert OID = 994 ( lseg_intersect PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ lseg_intersect - _null_ ));
DESCR("intersect?");
DESCR("add/update ACL item");
DATA(insert OID = 1036 ( aclremove PGNSP PGUID 12 f f t f i 2 1034 "1034 1033" _null_ aclremove - _null_ ));
DESCR("remove ACL item");
-DATA(insert OID = 1037 ( aclcontains PGNSP PGUID 12 f f t f i 2 16 "1034 1033" _null_ aclcontains - _null_ ));
+DATA(insert OID = 1037 ( aclcontains PGNSP PGUID 12 f f t f i 2 16 "1034 1033" _null_ aclcontains - _null_ ));
DESCR("does ACL contain item?");
-DATA(insert OID = 1062 ( aclitemeq PGNSP PGUID 12 f f t f i 2 16 "1033 1033" _null_ aclitem_eq - _null_ ));
+DATA(insert OID = 1062 ( aclitemeq PGNSP PGUID 12 f f t f i 2 16 "1033 1033" _null_ aclitem_eq - _null_ ));
DESCR("equality operator for ACL items");
DATA(insert OID = 1365 ( makeaclitem PGNSP PGUID 12 f f t f i 5 1033 "23 23 23 25 16" _null_ makeaclitem - _null_ ));
DESCR("make ACL item");
DESCR("I/O");
DATA(insert OID = 1047 ( varcharout PGNSP PGUID 12 f f t f i 1 2275 "1043" _null_ varcharout - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1048 ( bpchareq PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpchareq - _null_ ));
+DATA(insert OID = 1048 ( bpchareq PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpchareq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1049 ( bpcharlt PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharlt - _null_ ));
+DATA(insert OID = 1049 ( bpcharlt PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharlt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1050 ( bpcharle PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharle - _null_ ));
+DATA(insert OID = 1050 ( bpcharle PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharle - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1051 ( bpchargt PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpchargt - _null_ ));
+DATA(insert OID = 1051 ( bpchargt PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpchargt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1052 ( bpcharge PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharge - _null_ ));
+DATA(insert OID = 1052 ( bpcharge PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1053 ( bpcharne PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharne - _null_ ));
+DATA(insert OID = 1053 ( bpcharne PGNSP PGUID 12 f f t f i 2 16 "1042 1042" _null_ bpcharne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1078 ( bpcharcmp PGNSP PGUID 12 f f t f i 2 23 "1042 1042" _null_ bpcharcmp - _null_ ));
+DATA(insert OID = 1078 ( bpcharcmp PGNSP PGUID 12 f f t f i 2 23 "1042 1042" _null_ bpcharcmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1080 ( hashbpchar PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ hashbpchar - _null_ ));
+DATA(insert OID = 1080 ( hashbpchar PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ hashbpchar - _null_ ));
DESCR("hash");
DATA(insert OID = 1081 ( format_type PGNSP PGUID 12 f f f f s 2 25 "26 23" _null_ format_type - _null_ ));
DESCR("format a type oid and atttypmod to canonical SQL");
DESCR("I/O");
DATA(insert OID = 1085 ( date_out PGNSP PGUID 12 f f t f s 1 2275 "1082" _null_ date_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1086 ( date_eq PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_eq - _null_ ));
+DATA(insert OID = 1086 ( date_eq PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1087 ( date_lt PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_lt - _null_ ));
+DATA(insert OID = 1087 ( date_lt PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1088 ( date_le PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_le - _null_ ));
+DATA(insert OID = 1088 ( date_le PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1089 ( date_gt PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_gt - _null_ ));
+DATA(insert OID = 1089 ( date_gt PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1090 ( date_ge PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_ge - _null_ ));
+DATA(insert OID = 1090 ( date_ge PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1091 ( date_ne PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_ne - _null_ ));
+DATA(insert OID = 1091 ( date_ne PGNSP PGUID 12 f f t f i 2 16 "1082 1082" _null_ date_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1092 ( date_cmp PGNSP PGUID 12 f f t f i 2 23 "1082 1082" _null_ date_cmp - _null_ ));
+DATA(insert OID = 1092 ( date_cmp PGNSP PGUID 12 f f t f i 2 23 "1082 1082" _null_ date_cmp - _null_ ));
DESCR("less-equal-greater");
/* OIDS 1100 - 1199 */
-DATA(insert OID = 1102 ( time_lt PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_lt - _null_ ));
+DATA(insert OID = 1102 ( time_lt PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1103 ( time_le PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_le - _null_ ));
+DATA(insert OID = 1103 ( time_le PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1104 ( time_gt PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_gt - _null_ ));
+DATA(insert OID = 1104 ( time_gt PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1105 ( time_ge PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_ge - _null_ ));
+DATA(insert OID = 1105 ( time_ge PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1106 ( time_ne PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_ne - _null_ ));
+DATA(insert OID = 1106 ( time_ne PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1107 ( time_cmp PGNSP PGUID 12 f f t f i 2 23 "1083 1083" _null_ time_cmp - _null_ ));
+DATA(insert OID = 1107 ( time_cmp PGNSP PGUID 12 f f t f i 2 23 "1083 1083" _null_ time_cmp - _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 1138 ( date_larger PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ date_larger - _null_ ));
DESCR("larger of two");
DATA(insert OID = 1139 ( date_smaller PGNSP PGUID 12 f f t f i 2 1082 "1082 1082" _null_ date_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1140 ( date_mi PGNSP PGUID 12 f f t f i 2 23 "1082 1082" _null_ date_mi - _null_ ));
+DATA(insert OID = 1140 ( date_mi PGNSP PGUID 12 f f t f i 2 23 "1082 1082" _null_ date_mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1141 ( date_pli PGNSP PGUID 12 f f t f i 2 1082 "1082 23" _null_ date_pli - _null_ ));
+DATA(insert OID = 1141 ( date_pli PGNSP PGUID 12 f f t f i 2 1082 "1082 23" _null_ date_pli - _null_ ));
DESCR("add");
-DATA(insert OID = 1142 ( date_mii PGNSP PGUID 12 f f t f i 2 1082 "1082 23" _null_ date_mii - _null_ ));
+DATA(insert OID = 1142 ( date_mii PGNSP PGUID 12 f f t f i 2 1082 "1082 23" _null_ date_mii - _null_ ));
DESCR("subtract");
DATA(insert OID = 1143 ( time_in PGNSP PGUID 12 f f t f s 3 1083 "2275 26 23" _null_ time_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1144 ( time_out PGNSP PGUID 12 f f t f i 1 2275 "1083" _null_ time_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_eq - _null_ ));
+DATA(insert OID = 1145 ( time_eq PGNSP PGUID 12 f f t f i 2 16 "1083 1083" _null_ time_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1146 ( circle_add_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_add_pt - _null_ ));
+DATA(insert OID = 1146 ( circle_add_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_add_pt - _null_ ));
DESCR("add");
-DATA(insert OID = 1147 ( circle_sub_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_sub_pt - _null_ ));
+DATA(insert OID = 1147 ( circle_sub_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_sub_pt - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1148 ( circle_mul_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_mul_pt - _null_ ));
+DATA(insert OID = 1148 ( circle_mul_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_mul_pt - _null_ ));
DESCR("multiply");
-DATA(insert OID = 1149 ( circle_div_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_div_pt - _null_ ));
+DATA(insert OID = 1149 ( circle_div_pt PGNSP PGUID 12 f f t f i 2 718 "718 600" _null_ circle_div_pt - _null_ ));
DESCR("divide");
DATA(insert OID = 1150 ( timestamptz_in PGNSP PGUID 12 f f t f s 3 1184 "2275 26 23" _null_ timestamptz_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1151 ( timestamptz_out PGNSP PGUID 12 f f t f s 1 2275 "1184" _null_ timestamptz_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_eq - _null_ ));
+DATA(insert OID = 1152 ( timestamptz_eq PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1153 ( timestamptz_ne PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_ne - _null_ ));
+DATA(insert OID = 1153 ( timestamptz_ne PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1154 ( timestamptz_lt PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_lt - _null_ ));
+DATA(insert OID = 1154 ( timestamptz_lt PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1155 ( timestamptz_le PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_le - _null_ ));
+DATA(insert OID = 1155 ( timestamptz_le PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1156 ( timestamptz_ge PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_ge - _null_ ));
+DATA(insert OID = 1156 ( timestamptz_ge PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1157 ( timestamptz_gt PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_gt - _null_ ));
+DATA(insert OID = 1157 ( timestamptz_gt PGNSP PGUID 12 f f t f i 2 16 "1184 1184" _null_ timestamp_gt - _null_ ));
DESCR("greater-than");
DATA(insert OID = 1159 ( timezone PGNSP PGUID 12 f f t f i 2 1114 "25 1184" _null_ timestamptz_zone - _null_ ));
DESCR("adjust timestamp to new time zone");
DESCR("I/O");
DATA(insert OID = 1161 ( interval_out PGNSP PGUID 12 f f t f i 1 2275 "1186" _null_ interval_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_eq - _null_ ));
+DATA(insert OID = 1162 ( interval_eq PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1163 ( interval_ne PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_ne - _null_ ));
+DATA(insert OID = 1163 ( interval_ne PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1164 ( interval_lt PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_lt - _null_ ));
+DATA(insert OID = 1164 ( interval_lt PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1165 ( interval_le PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_le - _null_ ));
+DATA(insert OID = 1165 ( interval_le PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1166 ( interval_ge PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_ge - _null_ ));
+DATA(insert OID = 1166 ( interval_ge PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1167 ( interval_gt PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_gt - _null_ ));
+DATA(insert OID = 1167 ( interval_gt PGNSP PGUID 12 f f t f i 2 16 "1186 1186" _null_ interval_gt - _null_ ));
DESCR("greater-than");
DATA(insert OID = 1168 ( interval_um PGNSP PGUID 12 f f t f i 1 1186 "1186" _null_ interval_um - _null_ ));
DESCR("subtract");
DESCR("add");
DATA(insert OID = 1170 ( interval_mi PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ interval_mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1171 ( date_part PGNSP PGUID 12 f f t f s 2 701 "25 1184" _null_ timestamptz_part - _null_ ));
+DATA(insert OID = 1171 ( date_part PGNSP PGUID 12 f f t f s 2 701 "25 1184" _null_ timestamptz_part - _null_ ));
DESCR("extract field from timestamp with time zone");
-DATA(insert OID = 1172 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1186" _null_ interval_part - _null_ ));
+DATA(insert OID = 1172 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1186" _null_ interval_part - _null_ ));
DESCR("extract field from interval");
-DATA(insert OID = 1173 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "702" _null_ abstime_timestamptz - _null_ ));
+DATA(insert OID = 1173 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "702" _null_ abstime_timestamptz - _null_ ));
DESCR("convert abstime to timestamp with time zone");
DATA(insert OID = 1174 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "1082" _null_ date_timestamptz - _null_ ));
DESCR("convert date to timestamp with time zone");
DATA(insert OID = 1176 ( timestamptz PGNSP PGUID 14 f f t f s 2 1184 "1082 1083" _null_ "select cast(($1 + $2) as timestamp with time zone)" - _null_ ));
DESCR("convert date and time to timestamp with time zone");
-DATA(insert OID = 1177 ( interval PGNSP PGUID 12 f f t f i 1 1186 "703" _null_ reltime_interval - _null_ ));
+DATA(insert OID = 1177 ( interval PGNSP PGUID 12 f f t f i 1 1186 "703" _null_ reltime_interval - _null_ ));
DESCR("convert reltime to interval");
DATA(insert OID = 1178 ( date PGNSP PGUID 12 f f t f s 1 1082 "1184" _null_ timestamptz_date - _null_ ));
DESCR("convert timestamp with time zone to date");
-DATA(insert OID = 1179 ( date PGNSP PGUID 12 f f t f s 1 1082 "702" _null_ abstime_date - _null_ ));
+DATA(insert OID = 1179 ( date PGNSP PGUID 12 f f t f s 1 1082 "702" _null_ abstime_date - _null_ ));
DESCR("convert abstime to date");
DATA(insert OID = 1180 ( abstime PGNSP PGUID 12 f f t f s 1 702 "1184" _null_ timestamptz_abstime - _null_ ));
DESCR("convert timestamp with time zone to abstime");
DESCR("plus");
DATA(insert OID = 1190 ( timestamptz_mi_interval PGNSP PGUID 12 f f t f i 2 1184 "1184 1186" _null_ timestamptz_mi_interval - _null_ ));
DESCR("minus");
-DATA(insert OID = 1191 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "25" _null_ text_timestamptz - _null_ ));
+DATA(insert OID = 1191 ( timestamptz PGNSP PGUID 12 f f t f s 1 1184 "25" _null_ text_timestamptz - _null_ ));
DESCR("convert text to timestamp with time zone");
DATA(insert OID = 1192 ( text PGNSP PGUID 12 f f t f s 1 25 "1184" _null_ timestamptz_text - _null_ ));
DESCR("convert timestamp with time zone to text");
DESCR("smaller of two");
DATA(insert OID = 1196 ( timestamptz_larger PGNSP PGUID 12 f f t f i 2 1184 "1184 1184" _null_ timestamp_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1197 ( interval_smaller PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ interval_smaller - _null_ ));
+DATA(insert OID = 1197 ( interval_smaller PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ interval_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1198 ( interval_larger PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ interval_larger - _null_ ));
+DATA(insert OID = 1198 ( interval_larger PGNSP PGUID 12 f f t f i 2 1186 "1186 1186" _null_ interval_larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1199 ( age PGNSP PGUID 12 f f t f i 2 1186 "1184 1184" _null_ timestamptz_age - _null_ ));
+DATA(insert OID = 1199 ( age PGNSP PGUID 12 f f t f i 2 1186 "1184 1184" _null_ timestamptz_age - _null_ ));
DESCR("date difference preserving months and years");
/* OIDS 1200 - 1299 */
DATA(insert OID = 1216 ( col_description PGNSP PGUID 14 f f t f s 2 25 "26 23" _null_ "select description from pg_catalog.pg_description where objoid = $1 and classoid = \'pg_catalog.pg_class\'::regclass and objsubid = $2" - _null_ ));
DESCR("get description for table column");
-DATA(insert OID = 1217 ( date_trunc PGNSP PGUID 12 f f t f i 2 1184 "25 1184" _null_ timestamptz_trunc - _null_ ));
+DATA(insert OID = 1217 ( date_trunc PGNSP PGUID 12 f f t f i 2 1184 "25 1184" _null_ timestamptz_trunc - _null_ ));
DESCR("truncate timestamp with time zone to specified units");
-DATA(insert OID = 1218 ( date_trunc PGNSP PGUID 12 f f t f i 2 1186 "25 1186" _null_ interval_trunc - _null_ ));
+DATA(insert OID = 1218 ( date_trunc PGNSP PGUID 12 f f t f i 2 1186 "25 1186" _null_ interval_trunc - _null_ ));
DESCR("truncate interval to specified units");
DATA(insert OID = 1219 ( int8inc PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8inc - _null_ ));
DATA(insert OID = 1230 ( int8abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1236 ( int8larger PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8larger - _null_ ));
+DATA(insert OID = 1236 ( int8larger PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8larger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1237 ( int8smaller PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8smaller - _null_ ));
+DATA(insert OID = 1237 ( int8smaller PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1238 ( texticregexeq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texticregexeq - _null_ ));
+DATA(insert OID = 1238 ( texticregexeq PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texticregexeq - _null_ ));
DESCR("matches regex., case-insensitive");
-DATA(insert OID = 1239 ( texticregexne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texticregexne - _null_ ));
+DATA(insert OID = 1239 ( texticregexne PGNSP PGUID 12 f f t f i 2 16 "25 25" _null_ texticregexne - _null_ ));
DESCR("does not match regex., case-insensitive");
-DATA(insert OID = 1240 ( nameicregexeq PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameicregexeq - _null_ ));
+DATA(insert OID = 1240 ( nameicregexeq PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameicregexeq - _null_ ));
DESCR("matches regex., case-insensitive");
-DATA(insert OID = 1241 ( nameicregexne PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameicregexne - _null_ ));
+DATA(insert OID = 1241 ( nameicregexne PGNSP PGUID 12 f f t f i 2 16 "19 25" _null_ nameicregexne - _null_ ));
DESCR("does not match regex., case-insensitive");
DATA(insert OID = 1251 ( int4abs PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4abs - _null_ ));
DATA(insert OID = 1253 ( int2abs PGNSP PGUID 12 f f t f i 1 21 "21" _null_ int2abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1263 ( interval PGNSP PGUID 12 f f t f s 1 1186 "25" _null_ text_interval - _null_ ));
+DATA(insert OID = 1263 ( interval PGNSP PGUID 12 f f t f s 1 1186 "25" _null_ text_interval - _null_ ));
DESCR("convert text to interval");
DATA(insert OID = 1271 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1266 1266 1266 1266" _null_ overlaps_timetz - _null_ ));
DESCR("SQL92 interval comparison");
DATA(insert OID = 1272 ( datetime_pl PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ datetime_timestamp - _null_ ));
DESCR("convert date and time to timestamp");
-DATA(insert OID = 1273 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1266" _null_ timetz_part - _null_ ));
+DATA(insert OID = 1273 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1266" _null_ timetz_part - _null_ ));
DESCR("extract field from time with time zone");
-DATA(insert OID = 1274 ( int84pl PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84pl - _null_ ));
+DATA(insert OID = 1274 ( int84pl PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84pl - _null_ ));
DESCR("add");
-DATA(insert OID = 1275 ( int84mi PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84mi - _null_ ));
+DATA(insert OID = 1275 ( int84mi PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1276 ( int84mul PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84mul - _null_ ));
+DATA(insert OID = 1276 ( int84mul PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 1277 ( int84div PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84div - _null_ ));
+DATA(insert OID = 1277 ( int84div PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int84div - _null_ ));
DESCR("divide");
-DATA(insert OID = 1278 ( int48pl PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48pl - _null_ ));
+DATA(insert OID = 1278 ( int48pl PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48pl - _null_ ));
DESCR("add");
-DATA(insert OID = 1279 ( int48mi PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48mi - _null_ ));
+DATA(insert OID = 1279 ( int48mi PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1280 ( int48mul PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48mul - _null_ ));
+DATA(insert OID = 1280 ( int48mul PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 1281 ( int48div PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48div - _null_ ));
+DATA(insert OID = 1281 ( int48div PGNSP PGUID 12 f f t f i 2 20 "23 20" _null_ int48div - _null_ ));
DESCR("divide");
DATA(insert OID = 1287 ( oid PGNSP PGUID 12 f f t f i 1 26 "20" _null_ i8tooid - _null_ ));
DATA(insert OID = 1290 ( int8 PGNSP PGUID 12 f f t f i 1 20 "25" _null_ text_int8 - _null_ ));
DESCR("convert text to int8");
-DATA(insert OID = 1291 ( array_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ array_length_coerce - _null_ ));
+DATA(insert OID = 1291 ( array_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ array_length_coerce - _null_ ));
DESCR("adjust any array to new element typmod");
-DATA(insert OID = 1292 ( tideq PGNSP PGUID 12 f f t f i 2 16 "27 27" _null_ tideq - _null_ ));
+DATA(insert OID = 1292 ( tideq PGNSP PGUID 12 f f t f i 2 16 "27 27" _null_ tideq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1293 ( currtid PGNSP PGUID 12 f f t f v 2 27 "26 27" _null_ currtid_byreloid - _null_ ));
+DATA(insert OID = 1293 ( currtid PGNSP PGUID 12 f f t f v 2 27 "26 27" _null_ currtid_byreloid - _null_ ));
DESCR("latest tid of a tuple");
-DATA(insert OID = 1294 ( currtid2 PGNSP PGUID 12 f f t f v 2 27 "25 27" _null_ currtid_byrelname - _null_ ));
+DATA(insert OID = 1294 ( currtid2 PGNSP PGUID 12 f f t f v 2 27 "25 27" _null_ currtid_byrelname - _null_ ));
DESCR("latest tid of a tuple");
DATA(insert OID = 1296 ( timedate_pl PGNSP PGUID 14 f f t f i 2 1114 "1083 1082" _null_ "select ($2 + $1)" - _null_ ));
/* OIDS 1300 - 1399 */
-DATA(insert OID = 1300 ( positionsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ positionsel - _null_ ));
+DATA(insert OID = 1300 ( positionsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ positionsel - _null_ ));
DESCR("restriction selectivity for position-comparison operators");
-DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ positionjoinsel - _null_ ));
+DATA(insert OID = 1301 ( positionjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ positionjoinsel - _null_ ));
DESCR("join selectivity for position-comparison operators");
-DATA(insert OID = 1302 ( contsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ contsel - _null_ ));
+DATA(insert OID = 1302 ( contsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 23" _null_ contsel - _null_ ));
DESCR("restriction selectivity for containment comparison operators");
-DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ contjoinsel - _null_ ));
+DATA(insert OID = 1303 ( contjoinsel PGNSP PGUID 12 f f t f s 4 701 "2281 26 2281 21" _null_ contjoinsel - _null_ ));
DESCR("join selectivity for containment comparison operators");
-DATA(insert OID = 1304 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1184 1184 1184 1184" _null_ overlaps_timestamp - _null_ ));
+DATA(insert OID = 1304 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1184 1184 1184 1184" _null_ overlaps_timestamp - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1305 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1186 1184 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 1305 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1186 1184 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1306 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1184 1184 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 1306 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1184 1184 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1307 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1186 1184 1184" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
+DATA(insert OID = 1307 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1184 1186 1184 1184" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1308 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1083 1083 1083 1083" _null_ overlaps_time - _null_ ));
+DATA(insert OID = 1308 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1083 1083 1083 1083" _null_ overlaps_time - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1309 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1186 1083 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 1309 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1186 1083 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1310 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1083 1083 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 1310 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1083 1083 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 1311 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1186 1083 1083" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
+DATA(insert OID = 1311 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1083 1186 1083 1083" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
DESCR("SQL92 interval comparison");
DATA(insert OID = 1312 ( timestamp_in PGNSP PGUID 12 f f t f s 3 1114 "2275 26 23" _null_ timestamp_in - _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 1315 ( interval_cmp PGNSP PGUID 12 f f t f i 2 23 "1186 1186" _null_ interval_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1316 ( time PGNSP PGUID 12 f f t f i 1 1083 "1114" _null_ timestamp_time - _null_ ));
+DATA(insert OID = 1316 ( time PGNSP PGUID 12 f f t f i 1 1083 "1114" _null_ timestamp_time - _null_ ));
DESCR("convert timestamp to time");
-DATA(insert OID = 1317 ( length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
+DATA(insert OID = 1317 ( length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
DESCR("length");
DATA(insert OID = 1318 ( length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharlen - _null_ ));
DESCR("character length");
DATA(insert OID = 1319 ( xideqint4 PGNSP PGUID 12 f f t f i 2 16 "28 23" _null_ xideq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1326 ( interval_div PGNSP PGUID 12 f f t f i 2 1186 "1186 701" _null_ interval_div - _null_ ));
+DATA(insert OID = 1326 ( interval_div PGNSP PGUID 12 f f t f i 2 1186 "1186 701" _null_ interval_div - _null_ ));
DESCR("divide");
DATA(insert OID = 1339 ( dlog10 PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dlog10 - _null_ ));
* This form of obj_description is now deprecated, since it will fail if
* OIDs are not unique across system catalogs. Use the other forms instead.
*/
-DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 f f t f s 1 25 "26" _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" - _null_ ));
+DATA(insert OID = 1348 ( obj_description PGNSP PGUID 14 f f t f s 1 25 "26" _null_ "select description from pg_catalog.pg_description where objoid = $1 and objsubid = 0" - _null_ ));
DESCR("get description for object id (deprecated)");
-DATA(insert OID = 1349 ( oidvectortypes PGNSP PGUID 12 f f t f s 1 25 "30" _null_ oidvectortypes - _null_ ));
+DATA(insert OID = 1349 ( oidvectortypes PGNSP PGUID 12 f f t f s 1 25 "30" _null_ oidvectortypes - _null_ ));
DESCR("print type names of oidvector field");
DESCR("I/O");
DATA(insert OID = 1351 ( timetz_out PGNSP PGUID 12 f f t f i 1 2275 "1266" _null_ timetz_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_eq - _null_ ));
+DATA(insert OID = 1352 ( timetz_eq PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1353 ( timetz_ne PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_ne - _null_ ));
+DATA(insert OID = 1353 ( timetz_ne PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1354 ( timetz_lt PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_lt - _null_ ));
+DATA(insert OID = 1354 ( timetz_lt PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1355 ( timetz_le PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_le - _null_ ));
+DATA(insert OID = 1355 ( timetz_le PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1356 ( timetz_ge PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_ge - _null_ ));
+DATA(insert OID = 1356 ( timetz_ge PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1357 ( timetz_gt PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_gt - _null_ ));
+DATA(insert OID = 1357 ( timetz_gt PGNSP PGUID 12 f f t f i 2 16 "1266 1266" _null_ timetz_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1358 ( timetz_cmp PGNSP PGUID 12 f f t f i 2 23 "1266 1266" _null_ timetz_cmp - _null_ ));
+DATA(insert OID = 1358 ( timetz_cmp PGNSP PGUID 12 f f t f i 2 23 "1266 1266" _null_ timetz_cmp - _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 1359 ( timestamptz PGNSP PGUID 12 f f t f i 2 1184 "1082 1266" _null_ datetimetz_timestamptz - _null_ ));
DESCR("convert date and time with time zone to timestamp with time zone");
DATA(insert OID = 1367 ( character_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharlen - _null_ ));
DESCR("character length");
-DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
+DATA(insert OID = 1369 ( character_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
DESCR("character length");
-DATA(insert OID = 1370 ( interval PGNSP PGUID 12 f f t f i 1 1186 "1083" _null_ time_interval - _null_ ));
+DATA(insert OID = 1370 ( interval PGNSP PGUID 12 f f t f i 1 1186 "1083" _null_ time_interval - _null_ ));
DESCR("convert time to interval");
-DATA(insert OID = 1372 ( char_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharlen - _null_ ));
+DATA(insert OID = 1372 ( char_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharlen - _null_ ));
DESCR("character length");
-DATA(insert OID = 1373 ( array_type_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ array_type_length_coerce - _null_ ));
+DATA(insert OID = 1373 ( array_type_length_coerce PGNSP PGUID 12 f f t f s 3 2277 "2277 23 16" _null_ array_type_length_coerce - _null_ ));
DESCR("coerce array to another type and adjust element typmod");
DATA(insert OID = 1374 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textoctetlen - _null_ ));
DESCR("octet length");
-DATA(insert OID = 1375 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharoctetlen - _null_ ));
+DATA(insert OID = 1375 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "1042" _null_ bpcharoctetlen - _null_ ));
DESCR("octet length");
DATA(insert OID = 1377 ( time_larger PGNSP PGUID 12 f f t f i 2 1083 "1083 1083" _null_ time_larger - _null_ ));
DATA(insert OID = 1381 ( char_length PGNSP PGUID 12 f f t f i 1 23 "25" _null_ textlen - _null_ ));
DESCR("character length");
-DATA(insert OID = 1382 ( date_part PGNSP PGUID 14 f f t f s 2 701 "25 702" _null_ "select pg_catalog.date_part($1, cast($2 as timestamp with time zone))" - _null_ ));
+DATA(insert OID = 1382 ( date_part PGNSP PGUID 14 f f t f s 2 701 "25 702" _null_ "select pg_catalog.date_part($1, cast($2 as timestamp with time zone))" - _null_ ));
DESCR("extract field from abstime");
-DATA(insert OID = 1383 ( date_part PGNSP PGUID 14 f f t f s 2 701 "25 703" _null_ "select pg_catalog.date_part($1, cast($2 as pg_catalog.interval))" - _null_ ));
+DATA(insert OID = 1383 ( date_part PGNSP PGUID 14 f f t f s 2 701 "25 703" _null_ "select pg_catalog.date_part($1, cast($2 as pg_catalog.interval))" - _null_ ));
DESCR("extract field from reltime");
-DATA(insert OID = 1384 ( date_part PGNSP PGUID 14 f f t f i 2 701 "25 1082" _null_ "select pg_catalog.date_part($1, cast($2 as timestamp without time zone))" - _null_ ));
+DATA(insert OID = 1384 ( date_part PGNSP PGUID 14 f f t f i 2 701 "25 1082" _null_ "select pg_catalog.date_part($1, cast($2 as timestamp without time zone))" - _null_ ));
DESCR("extract field from date");
DATA(insert OID = 1385 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1083" _null_ time_part - _null_ ));
DESCR("extract field from time");
DATA(insert OID = 1388 ( timetz PGNSP PGUID 12 f f t f s 1 1266 "1184" _null_ timestamptz_timetz - _null_ ));
DESCR("convert timestamptz to timetz");
-DATA(insert OID = 1389 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1184" _null_ timestamp_finite - _null_ ));
+DATA(insert OID = 1389 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1184" _null_ timestamp_finite - _null_ ));
DESCR("finite timestamp?");
-DATA(insert OID = 1390 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1186" _null_ interval_finite - _null_ ));
+DATA(insert OID = 1390 ( isfinite PGNSP PGUID 12 f f t f i 1 16 "1186" _null_ interval_finite - _null_ ));
DESCR("finite interval?");
-DATA(insert OID = 1376 ( factorial PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ numeric_fac - _null_ ));
+DATA(insert OID = 1376 ( factorial PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ numeric_fac - _null_ ));
DESCR("factorial");
-DATA(insert OID = 1394 ( abs PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4abs - _null_ ));
+DATA(insert OID = 1394 ( abs PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4abs - _null_ ));
DESCR("absolute value");
-DATA(insert OID = 1395 ( abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8abs - _null_ ));
+DATA(insert OID = 1395 ( abs PGNSP PGUID 12 f f t f i 1 701 "701" _null_ float8abs - _null_ ));
DESCR("absolute value");
DATA(insert OID = 1396 ( abs PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8abs - _null_ ));
DESCR("absolute value");
/* OIDS 1400 - 1499 */
-DATA(insert OID = 1400 ( name PGNSP PGUID 12 f f t f i 1 19 "1043" _null_ text_name - _null_ ));
+DATA(insert OID = 1400 ( name PGNSP PGUID 12 f f t f i 1 19 "1043" _null_ text_name - _null_ ));
DESCR("convert varchar to name");
-DATA(insert OID = 1401 ( varchar PGNSP PGUID 12 f f t f i 1 1043 "19" _null_ name_text - _null_ ));
+DATA(insert OID = 1401 ( varchar PGNSP PGUID 12 f f t f i 1 1043 "19" _null_ name_text - _null_ ));
DESCR("convert name to varchar");
DATA(insert OID = 1402 ( current_schema PGNSP PGUID 12 f f t f s 0 19 "" _null_ current_schema - _null_ ));
DATA(insert OID = 1403 ( current_schemas PGNSP PGUID 12 f f t f s 1 1003 "16" _null_ current_schemas - _null_ ));
DESCR("current schema search list");
-DATA(insert OID = 1404 ( overlay PGNSP PGUID 14 f f t f i 4 25 "25 25 23 23" _null_ "select pg_catalog.substring($1, 1, ($3 - 1)) || $2 || pg_catalog.substring($1, ($3 + $4))" - _null_ ));
+DATA(insert OID = 1404 ( overlay PGNSP PGUID 14 f f t f i 4 25 "25 25 23 23" _null_ "select pg_catalog.substring($1, 1, ($3 - 1)) || $2 || pg_catalog.substring($1, ($3 + $4))" - _null_ ));
DESCR("substitute portion of string");
DATA(insert OID = 1405 ( overlay PGNSP PGUID 14 f f t f i 3 25 "25 25 23" _null_ "select pg_catalog.substring($1, 1, ($3 - 1)) || $2 || pg_catalog.substring($1, ($3 + pg_catalog.char_length($2)))" - _null_ ));
DESCR("substitute portion of string");
DESCR("vertical?");
DATA(insert OID = 1415 ( ishorizontal PGNSP PGUID 12 f f t f i 1 16 "628" _null_ line_horizontal - _null_ ));
DESCR("horizontal?");
-DATA(insert OID = 1416 ( point PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
+DATA(insert OID = 1416 ( point PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
DESCR("center of");
DATA(insert OID = 1417 ( isnottrue PGNSP PGUID 12 f f f f i 1 16 "16" _null_ isnottrue - _null_ ));
DATA(insert OID = 1419 ( time PGNSP PGUID 12 f f t f i 1 1083 "1186" _null_ interval_time - _null_ ));
DESCR("convert interval to time");
-DATA(insert OID = 1421 ( box PGNSP PGUID 12 f f t f i 2 603 "600 600" _null_ points_box - _null_ ));
+DATA(insert OID = 1421 ( box PGNSP PGUID 12 f f t f i 2 603 "600 600" _null_ points_box - _null_ ));
DESCR("convert points to box");
-DATA(insert OID = 1422 ( box_add PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_add - _null_ ));
+DATA(insert OID = 1422 ( box_add PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_add - _null_ ));
DESCR("add point to box (translate)");
-DATA(insert OID = 1423 ( box_sub PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_sub - _null_ ));
+DATA(insert OID = 1423 ( box_sub PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_sub - _null_ ));
DESCR("subtract point from box (translate)");
-DATA(insert OID = 1424 ( box_mul PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_mul - _null_ ));
+DATA(insert OID = 1424 ( box_mul PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_mul - _null_ ));
DESCR("multiply box by point (scale)");
-DATA(insert OID = 1425 ( box_div PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_div - _null_ ));
+DATA(insert OID = 1425 ( box_div PGNSP PGUID 12 f f t f i 2 603 "603 600" _null_ box_div - _null_ ));
DESCR("divide box by point (scale)");
DATA(insert OID = 1426 ( path_contain_pt PGNSP PGUID 14 f f t f i 2 16 "602 600" _null_ "select pg_catalog.on_ppath($2, $1)" - _null_ ));
DESCR("path contains point?");
* - thomas 97/04/20
*/
-DATA(insert OID = 1433 ( pclose PGNSP PGUID 12 f f t f i 1 602 "602" _null_ path_close - _null_ ));
+DATA(insert OID = 1433 ( pclose PGNSP PGUID 12 f f t f i 1 602 "602" _null_ path_close - _null_ ));
DESCR("close path");
-DATA(insert OID = 1434 ( popen PGNSP PGUID 12 f f t f i 1 602 "602" _null_ path_open - _null_ ));
+DATA(insert OID = 1434 ( popen PGNSP PGUID 12 f f t f i 1 602 "602" _null_ path_open - _null_ ));
DESCR("open path");
-DATA(insert OID = 1435 ( path_add PGNSP PGUID 12 f f t f i 2 602 "602 602" _null_ path_add - _null_ ));
+DATA(insert OID = 1435 ( path_add PGNSP PGUID 12 f f t f i 2 602 "602 602" _null_ path_add - _null_ ));
DESCR("concatenate open paths");
-DATA(insert OID = 1436 ( path_add_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_add_pt - _null_ ));
+DATA(insert OID = 1436 ( path_add_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_add_pt - _null_ ));
DESCR("add (translate path)");
-DATA(insert OID = 1437 ( path_sub_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_sub_pt - _null_ ));
+DATA(insert OID = 1437 ( path_sub_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_sub_pt - _null_ ));
DESCR("subtract (translate path)");
-DATA(insert OID = 1438 ( path_mul_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_mul_pt - _null_ ));
+DATA(insert OID = 1438 ( path_mul_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_mul_pt - _null_ ));
DESCR("multiply (rotate/scale path)");
-DATA(insert OID = 1439 ( path_div_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_div_pt - _null_ ));
+DATA(insert OID = 1439 ( path_div_pt PGNSP PGUID 12 f f t f i 2 602 "602 600" _null_ path_div_pt - _null_ ));
DESCR("divide (rotate/scale path)");
-DATA(insert OID = 1440 ( point PGNSP PGUID 12 f f t f i 2 600 "701 701" _null_ construct_point - _null_ ));
+DATA(insert OID = 1440 ( point PGNSP PGUID 12 f f t f i 2 600 "701 701" _null_ construct_point - _null_ ));
DESCR("convert x, y to point");
-DATA(insert OID = 1441 ( point_add PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_add - _null_ ));
+DATA(insert OID = 1441 ( point_add PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_add - _null_ ));
DESCR("add points (translate)");
-DATA(insert OID = 1442 ( point_sub PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_sub - _null_ ));
+DATA(insert OID = 1442 ( point_sub PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_sub - _null_ ));
DESCR("subtract points (translate)");
-DATA(insert OID = 1443 ( point_mul PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_mul - _null_ ));
+DATA(insert OID = 1443 ( point_mul PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_mul - _null_ ));
DESCR("multiply points (scale/rotate)");
-DATA(insert OID = 1444 ( point_div PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_div - _null_ ));
+DATA(insert OID = 1444 ( point_div PGNSP PGUID 12 f f t f i 2 600 "600 600" _null_ point_div - _null_ ));
DESCR("divide points (scale/rotate)");
DATA(insert OID = 1445 ( poly_npoints PGNSP PGUID 12 f f t f i 1 23 "604" _null_ poly_npoints - _null_ ));
DESCR("number of points in polygon");
-DATA(insert OID = 1446 ( box PGNSP PGUID 12 f f t f i 1 603 "604" _null_ poly_box - _null_ ));
+DATA(insert OID = 1446 ( box PGNSP PGUID 12 f f t f i 1 603 "604" _null_ poly_box - _null_ ));
DESCR("convert polygon to bounding box");
-DATA(insert OID = 1447 ( path PGNSP PGUID 12 f f t f i 1 602 "604" _null_ poly_path - _null_ ));
+DATA(insert OID = 1447 ( path PGNSP PGUID 12 f f t f i 1 602 "604" _null_ poly_path - _null_ ));
DESCR("convert polygon to path");
-DATA(insert OID = 1448 ( polygon PGNSP PGUID 12 f f t f i 1 604 "603" _null_ box_poly - _null_ ));
+DATA(insert OID = 1448 ( polygon PGNSP PGUID 12 f f t f i 1 604 "603" _null_ box_poly - _null_ ));
DESCR("convert box to polygon");
-DATA(insert OID = 1449 ( polygon PGNSP PGUID 12 f f t f i 1 604 "602" _null_ path_poly - _null_ ));
+DATA(insert OID = 1449 ( polygon PGNSP PGUID 12 f f t f i 1 604 "602" _null_ path_poly - _null_ ));
DESCR("convert path to polygon");
DATA(insert OID = 1450 ( circle_in PGNSP PGUID 12 f f t f i 1 718 "2275" _null_ circle_in - _null_ ));
DESCR("less-than-or-equal by area");
DATA(insert OID = 1467 ( circle_ge PGNSP PGUID 12 f f t f i 2 16 "718 718" _null_ circle_ge - _null_ ));
DESCR("greater-than-or-equal by area");
-DATA(insert OID = 1468 ( area PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_area - _null_ ));
+DATA(insert OID = 1468 ( area PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_area - _null_ ));
DESCR("area of circle");
-DATA(insert OID = 1469 ( diameter PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_diameter - _null_ ));
+DATA(insert OID = 1469 ( diameter PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_diameter - _null_ ));
DESCR("diameter of circle");
-DATA(insert OID = 1470 ( radius PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_radius - _null_ ));
+DATA(insert OID = 1470 ( radius PGNSP PGUID 12 f f t f i 1 701 "718" _null_ circle_radius - _null_ ));
DESCR("radius of circle");
-DATA(insert OID = 1471 ( circle_distance PGNSP PGUID 12 f f t f i 2 701 "718 718" _null_ circle_distance - _null_ ));
+DATA(insert OID = 1471 ( circle_distance PGNSP PGUID 12 f f t f i 2 701 "718 718" _null_ circle_distance - _null_ ));
DESCR("distance between");
-DATA(insert OID = 1472 ( circle_center PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
+DATA(insert OID = 1472 ( circle_center PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1473 ( circle PGNSP PGUID 12 f f t f i 2 718 "600 701" _null_ cr_circle - _null_ ));
+DATA(insert OID = 1473 ( circle PGNSP PGUID 12 f f t f i 2 718 "600 701" _null_ cr_circle - _null_ ));
DESCR("convert point and radius to circle");
-DATA(insert OID = 1474 ( circle PGNSP PGUID 12 f f t f i 1 718 "604" _null_ poly_circle - _null_ ));
+DATA(insert OID = 1474 ( circle PGNSP PGUID 12 f f t f i 1 718 "604" _null_ poly_circle - _null_ ));
DESCR("convert polygon to circle");
-DATA(insert OID = 1475 ( polygon PGNSP PGUID 12 f f t f i 2 604 "23 718" _null_ circle_poly - _null_ ));
+DATA(insert OID = 1475 ( polygon PGNSP PGUID 12 f f t f i 2 604 "23 718" _null_ circle_poly - _null_ ));
DESCR("convert vertex count and circle to polygon");
-DATA(insert OID = 1476 ( dist_pc PGNSP PGUID 12 f f t f i 2 701 "600 718" _null_ dist_pc - _null_ ));
+DATA(insert OID = 1476 ( dist_pc PGNSP PGUID 12 f f t f i 2 701 "600 718" _null_ dist_pc - _null_ ));
DESCR("distance between point and circle");
DATA(insert OID = 1477 ( circle_contain_pt PGNSP PGUID 12 f f t f i 2 16 "718 600" _null_ circle_contain_pt - _null_ ));
DESCR("circle contains point?");
DATA(insert OID = 1478 ( pt_contained_circle PGNSP PGUID 12 f f t f i 2 16 "600 718" _null_ pt_contained_circle - _null_ ));
DESCR("point inside circle?");
-DATA(insert OID = 1479 ( circle PGNSP PGUID 12 f f t f i 1 718 "603" _null_ box_circle - _null_ ));
+DATA(insert OID = 1479 ( circle PGNSP PGUID 12 f f t f i 1 718 "603" _null_ box_circle - _null_ ));
DESCR("convert box to circle");
-DATA(insert OID = 1480 ( box PGNSP PGUID 12 f f t f i 1 603 "718" _null_ circle_box - _null_ ));
+DATA(insert OID = 1480 ( box PGNSP PGUID 12 f f t f i 1 603 "718" _null_ circle_box - _null_ ));
DESCR("convert circle to box");
DATA(insert OID = 1481 ( tinterval PGNSP PGUID 12 f f t f i 2 704 "702 702" _null_ mktinterval - _null_ ));
DESCR("convert to tinterval");
DESCR("greater-than by length");
DATA(insert OID = 1486 ( lseg_ge PGNSP PGUID 12 f f t f i 2 16 "601 601" _null_ lseg_ge - _null_ ));
DESCR("greater-than-or-equal by length");
-DATA(insert OID = 1487 ( lseg_length PGNSP PGUID 12 f f t f i 1 701 "601" _null_ lseg_length - _null_ ));
+DATA(insert OID = 1487 ( lseg_length PGNSP PGUID 12 f f t f i 1 701 "601" _null_ lseg_length - _null_ ));
DESCR("distance between endpoints");
-DATA(insert OID = 1488 ( close_ls PGNSP PGUID 12 f f t f i 2 600 "628 601" _null_ close_ls - _null_ ));
+DATA(insert OID = 1488 ( close_ls PGNSP PGUID 12 f f t f i 2 600 "628 601" _null_ close_ls - _null_ ));
DESCR("closest point to line on line segment");
-DATA(insert OID = 1489 ( close_lseg PGNSP PGUID 12 f f t f i 2 600 "601 601" _null_ close_lseg - _null_ ));
+DATA(insert OID = 1489 ( close_lseg PGNSP PGUID 12 f f t f i 2 600 "601 601" _null_ close_lseg - _null_ ));
DESCR("closest point to line segment on line segment");
DATA(insert OID = 1490 ( line_in PGNSP PGUID 12 f f t f i 1 628 "2275" _null_ line_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1491 ( line_out PGNSP PGUID 12 f f t f i 1 2275 "628" _null_ line_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 1492 ( line_eq PGNSP PGUID 12 f f t f i 2 16 "628 628" _null_ line_eq - _null_ ));
+DATA(insert OID = 1492 ( line_eq PGNSP PGUID 12 f f t f i 2 16 "628 628" _null_ line_eq - _null_ ));
DESCR("lines equal?");
-DATA(insert OID = 1493 ( line PGNSP PGUID 12 f f t f i 2 628 "600 600" _null_ line_construct_pp - _null_ ));
+DATA(insert OID = 1493 ( line PGNSP PGUID 12 f f t f i 2 628 "600 600" _null_ line_construct_pp - _null_ ));
DESCR("line from points");
-DATA(insert OID = 1494 ( line_interpt PGNSP PGUID 12 f f t f i 2 600 "628 628" _null_ line_interpt - _null_ ));
+DATA(insert OID = 1494 ( line_interpt PGNSP PGUID 12 f f t f i 2 600 "628 628" _null_ line_interpt - _null_ ));
DESCR("intersection point");
DATA(insert OID = 1495 ( line_intersect PGNSP PGUID 12 f f t f i 2 16 "628 628" _null_ line_intersect - _null_ ));
DESCR("intersect?");
/* OIDS 1500 - 1599 */
-DATA(insert OID = 1530 ( length PGNSP PGUID 12 f f t f i 1 701 "601" _null_ lseg_length - _null_ ));
+DATA(insert OID = 1530 ( length PGNSP PGUID 12 f f t f i 1 701 "601" _null_ lseg_length - _null_ ));
DESCR("distance between endpoints");
-DATA(insert OID = 1531 ( length PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_length - _null_ ));
+DATA(insert OID = 1531 ( length PGNSP PGUID 12 f f t f i 1 701 "602" _null_ path_length - _null_ ));
DESCR("sum of path segments");
-DATA(insert OID = 1532 ( point PGNSP PGUID 12 f f t f i 1 600 "601" _null_ lseg_center - _null_ ));
+DATA(insert OID = 1532 ( point PGNSP PGUID 12 f f t f i 1 600 "601" _null_ lseg_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1533 ( point PGNSP PGUID 12 f f t f i 1 600 "602" _null_ path_center - _null_ ));
+DATA(insert OID = 1533 ( point PGNSP PGUID 12 f f t f i 1 600 "602" _null_ path_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1534 ( point PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
+DATA(insert OID = 1534 ( point PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1540 ( point PGNSP PGUID 12 f f t f i 1 600 "604" _null_ poly_center - _null_ ));
+DATA(insert OID = 1540 ( point PGNSP PGUID 12 f f t f i 1 600 "604" _null_ poly_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1541 ( lseg PGNSP PGUID 12 f f t f i 1 601 "603" _null_ box_diagonal - _null_ ));
+DATA(insert OID = 1541 ( lseg PGNSP PGUID 12 f f t f i 1 601 "603" _null_ box_diagonal - _null_ ));
DESCR("diagonal of");
-DATA(insert OID = 1542 ( center PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
+DATA(insert OID = 1542 ( center PGNSP PGUID 12 f f t f i 1 600 "603" _null_ box_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1543 ( center PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
+DATA(insert OID = 1543 ( center PGNSP PGUID 12 f f t f i 1 600 "718" _null_ circle_center - _null_ ));
DESCR("center of");
-DATA(insert OID = 1544 ( polygon PGNSP PGUID 14 f f t f i 1 604 "718" _null_ "select pg_catalog.polygon(12, $1)" - _null_ ));
+DATA(insert OID = 1544 ( polygon PGNSP PGUID 14 f f t f i 1 604 "718" _null_ "select pg_catalog.polygon(12, $1)" - _null_ ));
DESCR("convert circle to 12-vertex polygon");
DATA(insert OID = 1545 ( npoints PGNSP PGUID 12 f f t f i 1 23 "602" _null_ path_npoints - _null_ ));
DESCR("number of points in path");
DATA(insert OID = 1556 ( npoints PGNSP PGUID 12 f f t f i 1 23 "604" _null_ poly_npoints - _null_ ));
DESCR("number of points in polygon");
-DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 f f t f i 3 1560 "2275 26 23" _null_ bit_in - _null_ ));
+DATA(insert OID = 1564 ( bit_in PGNSP PGUID 12 f f t f i 3 1560 "2275 26 23" _null_ bit_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1565 ( bit_out PGNSP PGUID 12 f f t f i 1 2275 "1560" _null_ bit_out - _null_ ));
DESCR("I/O");
DESCR("sequence current value");
DATA(insert OID = 1576 ( setval PGNSP PGUID 12 f f t f v 2 20 "25 20" _null_ setval - _null_ ));
DESCR("set sequence value");
-DATA(insert OID = 1765 ( setval PGNSP PGUID 12 f f t f v 3 20 "25 20 16" _null_ setval_and_iscalled - _null_ ));
+DATA(insert OID = 1765 ( setval PGNSP PGUID 12 f f t f v 3 20 "25 20 16" _null_ setval_and_iscalled - _null_ ));
DESCR("set sequence value and iscalled status");
-DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 f f t f i 3 1562 "2275 26 23" _null_ varbit_in - _null_ ));
+DATA(insert OID = 1579 ( varbit_in PGNSP PGUID 12 f f t f i 3 1562 "2275 26 23" _null_ varbit_in - _null_ ));
DESCR("I/O");
DATA(insert OID = 1580 ( varbit_out PGNSP PGUID 12 f f t f i 1 2275 "1562" _null_ varbit_out - _null_ ));
DESCR("I/O");
DATA(insert OID = 1598 ( random PGNSP PGUID 12 f f t f v 0 701 "" _null_ drandom - _null_ ));
DESCR("random value");
-DATA(insert OID = 1599 ( setseed PGNSP PGUID 12 f f t f v 1 23 "701" _null_ setseed - _null_ ));
+DATA(insert OID = 1599 ( setseed PGNSP PGUID 12 f f t f v 1 23 "701" _null_ setseed - _null_ ));
DESCR("set random seed");
/* OIDS 1600 - 1699 */
-DATA(insert OID = 1600 ( asin PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dasin - _null_ ));
+DATA(insert OID = 1600 ( asin PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dasin - _null_ ));
DESCR("arcsine");
-DATA(insert OID = 1601 ( acos PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dacos - _null_ ));
+DATA(insert OID = 1601 ( acos PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dacos - _null_ ));
DESCR("arccosine");
-DATA(insert OID = 1602 ( atan PGNSP PGUID 12 f f t f i 1 701 "701" _null_ datan - _null_ ));
+DATA(insert OID = 1602 ( atan PGNSP PGUID 12 f f t f i 1 701 "701" _null_ datan - _null_ ));
DESCR("arctangent");
-DATA(insert OID = 1603 ( atan2 PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ datan2 - _null_ ));
+DATA(insert OID = 1603 ( atan2 PGNSP PGUID 12 f f t f i 2 701 "701 701" _null_ datan2 - _null_ ));
DESCR("arctangent, two arguments");
-DATA(insert OID = 1604 ( sin PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsin - _null_ ));
+DATA(insert OID = 1604 ( sin PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dsin - _null_ ));
DESCR("sine");
-DATA(insert OID = 1605 ( cos PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcos - _null_ ));
+DATA(insert OID = 1605 ( cos PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcos - _null_ ));
DESCR("cosine");
-DATA(insert OID = 1606 ( tan PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dtan - _null_ ));
+DATA(insert OID = 1606 ( tan PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dtan - _null_ ));
DESCR("tangent");
-DATA(insert OID = 1607 ( cot PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcot - _null_ ));
+DATA(insert OID = 1607 ( cot PGNSP PGUID 12 f f t f i 1 701 "701" _null_ dcot - _null_ ));
DESCR("cotangent");
-DATA(insert OID = 1608 ( degrees PGNSP PGUID 12 f f t f i 1 701 "701" _null_ degrees - _null_ ));
+DATA(insert OID = 1608 ( degrees PGNSP PGUID 12 f f t f i 1 701 "701" _null_ degrees - _null_ ));
DESCR("radians to degrees");
-DATA(insert OID = 1609 ( radians PGNSP PGUID 12 f f t f i 1 701 "701" _null_ radians - _null_ ));
+DATA(insert OID = 1609 ( radians PGNSP PGUID 12 f f t f i 1 701 "701" _null_ radians - _null_ ));
DESCR("degrees to radians");
DATA(insert OID = 1610 ( pi PGNSP PGUID 12 f f t f i 0 701 "" _null_ dpi - _null_ ));
DESCR("PI");
DATA(insert OID = 1637 ( like_escape PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ like_escape - _null_ ));
DESCR("convert LIKE pattern to use backslash escapes");
-DATA(insert OID = 1656 ( bpcharicregexeq PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ texticregexeq - _null_ ));
+DATA(insert OID = 1656 ( bpcharicregexeq PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ texticregexeq - _null_ ));
DESCR("matches regex., case-insensitive");
-DATA(insert OID = 1657 ( bpcharicregexne PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ texticregexne - _null_ ));
+DATA(insert OID = 1657 ( bpcharicregexne PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ texticregexne - _null_ ));
DESCR("does not match regex., case-insensitive");
DATA(insert OID = 1658 ( bpcharregexeq PGNSP PGUID 12 f f t f i 2 16 "1042 25" _null_ textregexeq - _null_ ));
DESCR("matches regex., case-sensitive");
DESCR("update pg_pwd and pg_group files");
/* Oracle Compatibility Related Functions - By Edmund Mergl */
-DATA(insert OID = 868 ( strpos PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ textpos - _null_ ));
+DATA(insert OID = 868 ( strpos PGNSP PGUID 12 f f t f i 2 23 "25 25" _null_ textpos - _null_ ));
DESCR("find position of substring");
DATA(insert OID = 870 ( lower PGNSP PGUID 12 f f t f i 1 25 "25" _null_ lower - _null_ ));
DESCR("lowercase");
DESCR("uppercase");
DATA(insert OID = 872 ( initcap PGNSP PGUID 12 f f t f i 1 25 "25" _null_ initcap - _null_ ));
DESCR("capitalize each word");
-DATA(insert OID = 873 ( lpad PGNSP PGUID 12 f f t f i 3 25 "25 23 25" _null_ lpad - _null_ ));
+DATA(insert OID = 873 ( lpad PGNSP PGUID 12 f f t f i 3 25 "25 23 25" _null_ lpad - _null_ ));
DESCR("left-pad string to length");
-DATA(insert OID = 874 ( rpad PGNSP PGUID 12 f f t f i 3 25 "25 23 25" _null_ rpad - _null_ ));
+DATA(insert OID = 874 ( rpad PGNSP PGUID 12 f f t f i 3 25 "25 23 25" _null_ rpad - _null_ ));
DESCR("right-pad string to length");
-DATA(insert OID = 875 ( ltrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ ltrim - _null_ ));
+DATA(insert OID = 875 ( ltrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ ltrim - _null_ ));
DESCR("trim selected characters from left end of string");
-DATA(insert OID = 876 ( rtrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ rtrim - _null_ ));
+DATA(insert OID = 876 ( rtrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ rtrim - _null_ ));
DESCR("trim selected characters from right end of string");
-DATA(insert OID = 877 ( substr PGNSP PGUID 12 f f t f i 3 25 "25 23 23" _null_ text_substr - _null_ ));
+DATA(insert OID = 877 ( substr PGNSP PGUID 12 f f t f i 3 25 "25 23 23" _null_ text_substr - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 878 ( translate PGNSP PGUID 12 f f t f i 3 25 "25 25 25" _null_ translate - _null_ ));
+DATA(insert OID = 878 ( translate PGNSP PGUID 12 f f t f i 3 25 "25 25 25" _null_ translate - _null_ ));
DESCR("map a set of character appearing in string");
-DATA(insert OID = 879 ( lpad PGNSP PGUID 14 f f t f i 2 25 "25 23" _null_ "select pg_catalog.lpad($1, $2, \' \')" - _null_ ));
+DATA(insert OID = 879 ( lpad PGNSP PGUID 14 f f t f i 2 25 "25 23" _null_ "select pg_catalog.lpad($1, $2, \' \')" - _null_ ));
DESCR("left-pad string to length");
-DATA(insert OID = 880 ( rpad PGNSP PGUID 14 f f t f i 2 25 "25 23" _null_ "select pg_catalog.rpad($1, $2, \' \')" - _null_ ));
+DATA(insert OID = 880 ( rpad PGNSP PGUID 14 f f t f i 2 25 "25 23" _null_ "select pg_catalog.rpad($1, $2, \' \')" - _null_ ));
DESCR("right-pad string to length");
DATA(insert OID = 881 ( ltrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ ltrim1 - _null_ ));
DESCR("trim spaces from left end of string");
DATA(insert OID = 882 ( rtrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ rtrim1 - _null_ ));
DESCR("trim spaces from right end of string");
-DATA(insert OID = 883 ( substr PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ text_substr_no_len - _null_ ));
+DATA(insert OID = 883 ( substr PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ text_substr_no_len - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 884 ( btrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ btrim - _null_ ));
+DATA(insert OID = 884 ( btrim PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ btrim - _null_ ));
DESCR("trim selected characters from both ends of string");
DATA(insert OID = 885 ( btrim PGNSP PGUID 12 f f t f i 1 25 "25" _null_ btrim1 - _null_ ));
DESCR("trim spaces from both ends of string");
-DATA(insert OID = 936 ( substring PGNSP PGUID 12 f f t f i 3 25 "25 23 23" _null_ text_substr - _null_ ));
+DATA(insert OID = 936 ( substring PGNSP PGUID 12 f f t f i 3 25 "25 23 23" _null_ text_substr - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 937 ( substring PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ text_substr_no_len - _null_ ));
+DATA(insert OID = 937 ( substring PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ text_substr_no_len - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 2087 ( replace PGNSP PGUID 12 f f t f i 3 25 "25 25 25" _null_ replace_text - _null_ ));
+DATA(insert OID = 2087 ( replace PGNSP PGUID 12 f f t f i 3 25 "25 25 25" _null_ replace_text - _null_ ));
DESCR("replace all occurrences of old_substr with new_substr in string");
-DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 f f t f i 3 25 "25 25 23" _null_ split_text - _null_ ));
+DATA(insert OID = 2088 ( split_part PGNSP PGUID 12 f f t f i 3 25 "25 25 23" _null_ split_text - _null_ ));
DESCR("split string by field_sep and return field_num");
DATA(insert OID = 2089 ( to_hex PGNSP PGUID 12 f f t f i 1 25 "23" _null_ to_hex32 - _null_ ));
DESCR("convert int4 number to hex");
DATA(insert OID = 810 ( pg_client_encoding PGNSP PGUID 12 f f t f s 0 19 "" _null_ pg_client_encoding - _null_ ));
DESCR("encoding name of current database");
-DATA(insert OID = 1717 ( convert PGNSP PGUID 12 f f t f s 2 25 "25 19" _null_ pg_convert - _null_ ));
+DATA(insert OID = 1717 ( convert PGNSP PGUID 12 f f t f s 2 25 "25 19" _null_ pg_convert - _null_ ));
DESCR("convert string with specified destination encoding name");
-DATA(insert OID = 1813 ( convert PGNSP PGUID 12 f f t f s 3 25 "25 19 19" _null_ pg_convert2 - _null_ ));
+DATA(insert OID = 1813 ( convert PGNSP PGUID 12 f f t f s 3 25 "25 19 19" _null_ pg_convert2 - _null_ ));
DESCR("convert string with specified encoding names");
DATA(insert OID = 1619 ( convert_using PGNSP PGUID 12 f f t f s 2 25 "25 25" _null_ pg_convert_using - _null_ ));
DATA(insert OID = 1597 ( pg_encoding_to_char PGNSP PGUID 12 f f t f s 1 19 "23" _null_ PG_encoding_to_char - _null_ ));
DESCR("convert encoding id to encoding name");
-DATA(insert OID = 1638 ( oidgt PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidgt - _null_ ));
+DATA(insert OID = 1638 ( oidgt PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidgt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1639 ( oidge PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidge - _null_ ));
+DATA(insert OID = 1639 ( oidge PGNSP PGUID 12 f f t f i 2 16 "26 26" _null_ oidge - _null_ ));
DESCR("greater-than-or-equal");
/* System-view support functions */
DESCR("trigger description");
DATA(insert OID = 1387 ( pg_get_constraintdef PGNSP PGUID 12 f f t f s 1 25 "26" _null_ pg_get_constraintdef - _null_ ));
DESCR("constraint description");
-DATA(insert OID = 1716 ( pg_get_expr PGNSP PGUID 12 f f t f s 2 25 "25 26" _null_ pg_get_expr - _null_ ));
+DATA(insert OID = 1716 ( pg_get_expr PGNSP PGUID 12 f f t f s 2 25 "25 26" _null_ pg_get_expr - _null_ ));
DESCR("deparse an encoded expression");
DATA(insert OID = 1665 ( pg_get_serial_sequence PGNSP PGUID 12 f f t f s 2 25 "25 25" _null_ pg_get_serial_sequence - _null_ ));
DESCR("name of sequence for a serial column");
DATA(insert OID = 1672 ( varbitcmp PGNSP PGUID 12 f f t f i 2 23 "1562 1562" _null_ bitcmp - _null_ ));
DESCR("compare");
-DATA(insert OID = 1673 ( bitand PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitand - _null_ ));
+DATA(insert OID = 1673 ( bitand PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitand - _null_ ));
DESCR("bitwise and");
-DATA(insert OID = 1674 ( bitor PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitor - _null_ ));
+DATA(insert OID = 1674 ( bitor PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitor - _null_ ));
DESCR("bitwise or");
-DATA(insert OID = 1675 ( bitxor PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitxor - _null_ ));
+DATA(insert OID = 1675 ( bitxor PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitxor - _null_ ));
DESCR("bitwise exclusive or");
DATA(insert OID = 1676 ( bitnot PGNSP PGUID 12 f f t f i 1 1560 "1560" _null_ bitnot - _null_ ));
DESCR("bitwise negation");
DESCR("bitwise left shift");
DATA(insert OID = 1678 ( bitshiftright PGNSP PGUID 12 f f t f i 2 1560 "1560 23" _null_ bitshiftright - _null_ ));
DESCR("bitwise right shift");
-DATA(insert OID = 1679 ( bitcat PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitcat - _null_ ));
+DATA(insert OID = 1679 ( bitcat PGNSP PGUID 12 f f t f i 2 1560 "1560 1560" _null_ bitcat - _null_ ));
DESCR("bitwise concatenation");
-DATA(insert OID = 1680 ( substring PGNSP PGUID 12 f f t f i 3 1560 "1560 23 23" _null_ bitsubstr - _null_ ));
+DATA(insert OID = 1680 ( substring PGNSP PGUID 12 f f t f i 3 1560 "1560 23 23" _null_ bitsubstr - _null_ ));
DESCR("return portion of bitstring");
-DATA(insert OID = 1681 ( length PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bitlength - _null_ ));
+DATA(insert OID = 1681 ( length PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bitlength - _null_ ));
DESCR("bitstring length");
-DATA(insert OID = 1682 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bitoctetlength - _null_ ));
+DATA(insert OID = 1682 ( octet_length PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bitoctetlength - _null_ ));
DESCR("octet length");
DATA(insert OID = 1683 ( bit PGNSP PGUID 12 f f t f i 2 1560 "23 23" _null_ bitfromint4 - _null_ ));
DESCR("int4 to bitstring");
-DATA(insert OID = 1684 ( int4 PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bittoint4 - _null_ ));
+DATA(insert OID = 1684 ( int4 PGNSP PGUID 12 f f t f i 1 23 "1560" _null_ bittoint4 - _null_ ));
DESCR("bitstring to int4");
DATA(insert OID = 1685 ( bit PGNSP PGUID 12 f f t f i 3 1560 "1560 23 16" _null_ bit - _null_ ));
DATA(insert OID = 437 ( macaddr_out PGNSP PGUID 12 f f t f i 1 2275 "829" _null_ macaddr_out - _null_ ));
DESCR("I/O");
-DATA(insert OID = 752 ( text PGNSP PGUID 12 f f t f i 1 25 "829" _null_ macaddr_text - _null_ ));
+DATA(insert OID = 752 ( text PGNSP PGUID 12 f f t f i 1 25 "829" _null_ macaddr_text - _null_ ));
DESCR("MAC address to text");
-DATA(insert OID = 753 ( trunc PGNSP PGUID 12 f f t f i 1 829 "829" _null_ macaddr_trunc - _null_ ));
+DATA(insert OID = 753 ( trunc PGNSP PGUID 12 f f t f i 1 829 "829" _null_ macaddr_trunc - _null_ ));
DESCR("MAC manufacturer fields");
-DATA(insert OID = 767 ( macaddr PGNSP PGUID 12 f f t f i 1 829 "25" _null_ text_macaddr - _null_ ));
+DATA(insert OID = 767 ( macaddr PGNSP PGUID 12 f f t f i 1 829 "25" _null_ text_macaddr - _null_ ));
DESCR("text to MAC address");
-DATA(insert OID = 830 ( macaddr_eq PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_eq - _null_ ));
+DATA(insert OID = 830 ( macaddr_eq PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 831 ( macaddr_lt PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_lt - _null_ ));
+DATA(insert OID = 831 ( macaddr_lt PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 832 ( macaddr_le PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_le - _null_ ));
+DATA(insert OID = 832 ( macaddr_le PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 833 ( macaddr_gt PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_gt - _null_ ));
+DATA(insert OID = 833 ( macaddr_gt PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 834 ( macaddr_ge PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_ge - _null_ ));
+DATA(insert OID = 834 ( macaddr_ge PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 835 ( macaddr_ne PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_ne - _null_ ));
+DATA(insert OID = 835 ( macaddr_ne PGNSP PGUID 12 f f t f i 2 16 "829 829" _null_ macaddr_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 836 ( macaddr_cmp PGNSP PGUID 12 f f t f i 2 23 "829 829" _null_ macaddr_cmp - _null_ ));
+DATA(insert OID = 836 ( macaddr_cmp PGNSP PGUID 12 f f t f i 2 23 "829 829" _null_ macaddr_cmp - _null_ ));
DESCR("less-equal-greater");
/* for inet type support */
DESCR("I/O");
/* these are used for both inet and cidr */
-DATA(insert OID = 920 ( network_eq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_eq - _null_ ));
+DATA(insert OID = 920 ( network_eq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 921 ( network_lt PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_lt - _null_ ));
+DATA(insert OID = 921 ( network_lt PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 922 ( network_le PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_le - _null_ ));
+DATA(insert OID = 922 ( network_le PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 923 ( network_gt PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_gt - _null_ ));
+DATA(insert OID = 923 ( network_gt PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 924 ( network_ge PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_ge - _null_ ));
+DATA(insert OID = 924 ( network_ge PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 925 ( network_ne PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_ne - _null_ ));
+DATA(insert OID = 925 ( network_ne PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 926 ( network_cmp PGNSP PGUID 12 f f t f i 2 23 "869 869" _null_ network_cmp - _null_ ));
+DATA(insert OID = 926 ( network_cmp PGNSP PGUID 12 f f t f i 2 23 "869 869" _null_ network_cmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 927 ( network_sub PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_sub - _null_ ));
+DATA(insert OID = 927 ( network_sub PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_sub - _null_ ));
DESCR("is-subnet");
-DATA(insert OID = 928 ( network_subeq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_subeq - _null_ ));
+DATA(insert OID = 928 ( network_subeq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_subeq - _null_ ));
DESCR("is-subnet-or-equal");
-DATA(insert OID = 929 ( network_sup PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_sup - _null_ ));
+DATA(insert OID = 929 ( network_sup PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_sup - _null_ ));
DESCR("is-supernet");
-DATA(insert OID = 930 ( network_supeq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_supeq - _null_ ));
+DATA(insert OID = 930 ( network_supeq PGNSP PGUID 12 f f t f i 2 16 "869 869" _null_ network_supeq - _null_ ));
DESCR("is-supernet-or-equal");
/* inet/cidr functions */
-DATA(insert OID = 605 ( abbrev PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_abbrev - _null_ ));
+DATA(insert OID = 605 ( abbrev PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_abbrev - _null_ ));
DESCR("abbreviated display of inet/cidr value");
-DATA(insert OID = 711 ( family PGNSP PGUID 12 f f t f i 1 23 "869" _null_ network_family - _null_ ));
+DATA(insert OID = 711 ( family PGNSP PGUID 12 f f t f i 1 23 "869" _null_ network_family - _null_ ));
DESCR("return address family (4 for IPv4, 6 for IPv6)");
-DATA(insert OID = 683 ( network PGNSP PGUID 12 f f t f i 1 650 "869" _null_ network_network - _null_ ));
+DATA(insert OID = 683 ( network PGNSP PGUID 12 f f t f i 1 650 "869" _null_ network_network - _null_ ));
DESCR("network part of address");
-DATA(insert OID = 696 ( netmask PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_netmask - _null_ ));
+DATA(insert OID = 696 ( netmask PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_netmask - _null_ ));
DESCR("netmask of address");
-DATA(insert OID = 697 ( masklen PGNSP PGUID 12 f f t f i 1 23 "869" _null_ network_masklen - _null_ ));
+DATA(insert OID = 697 ( masklen PGNSP PGUID 12 f f t f i 1 23 "869" _null_ network_masklen - _null_ ));
DESCR("netmask length");
-DATA(insert OID = 698 ( broadcast PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_broadcast - _null_ ));
+DATA(insert OID = 698 ( broadcast PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_broadcast - _null_ ));
DESCR("broadcast address of network");
-DATA(insert OID = 699 ( host PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_host - _null_ ));
+DATA(insert OID = 699 ( host PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_host - _null_ ));
DESCR("show address octets only");
-DATA(insert OID = 730 ( text PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_show - _null_ ));
+DATA(insert OID = 730 ( text PGNSP PGUID 12 f f t f i 1 25 "869" _null_ network_show - _null_ ));
DESCR("show all parts of inet/cidr value");
-DATA(insert OID = 1362 ( hostmask PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_hostmask - _null_ ));
+DATA(insert OID = 1362 ( hostmask PGNSP PGUID 12 f f t f i 1 869 "869" _null_ network_hostmask - _null_ ));
DESCR("hostmask of address");
-DATA(insert OID = 1713 ( inet PGNSP PGUID 12 f f t f i 1 869 "25" _null_ text_inet - _null_ ));
+DATA(insert OID = 1713 ( inet PGNSP PGUID 12 f f t f i 1 869 "25" _null_ text_inet - _null_ ));
DESCR("text to inet");
-DATA(insert OID = 1714 ( cidr PGNSP PGUID 12 f f t f i 1 650 "25" _null_ text_cidr - _null_ ));
+DATA(insert OID = 1714 ( cidr PGNSP PGUID 12 f f t f i 1 650 "25" _null_ text_cidr - _null_ ));
DESCR("text to cidr");
-DATA(insert OID = 1715 ( set_masklen PGNSP PGUID 12 f f t f i 2 869 "869 23" _null_ inet_set_masklen - _null_ ));
+DATA(insert OID = 1715 ( set_masklen PGNSP PGUID 12 f f t f i 2 869 "869 23" _null_ inet_set_masklen - _null_ ));
DESCR("change the netmask of an inet");
DATA(insert OID = 2196 ( inet_client_addr PGNSP PGUID 12 f f f f s 0 869 "" _null_ inet_client_addr - _null_ ));
DATA(insert OID = 2199 ( inet_server_port PGNSP PGUID 12 f f f f s 0 23 "" _null_ inet_server_port - _null_ ));
DESCR("server's port number for this connection");
-DATA(insert OID = 1686 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "25" _null_ text_numeric - _null_ ));
+DATA(insert OID = 1686 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "25" _null_ text_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1688 ( text PGNSP PGUID 12 f f t f i 1 25 "1700" _null_ numeric_text - _null_ ));
+DATA(insert OID = 1688 ( text PGNSP PGUID 12 f f t f i 1 25 "1700" _null_ numeric_text - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 f f t f i 2 1186 "1083 1083" _null_ time_mi_time - _null_ ));
+DATA(insert OID = 1690 ( time_mi_time PGNSP PGUID 12 f f t f i 2 1186 "1083 1083" _null_ time_mi_time - _null_ ));
DESCR("minus");
DATA(insert OID = 1691 ( boolle PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolle - _null_ ));
DATA(insert OID = 1693 ( btboolcmp PGNSP PGUID 12 f f t f i 2 23 "16 16" _null_ btboolcmp - _null_ ));
DESCR("btree less-equal-greater");
-DATA(insert OID = 1696 ( timetz_hash PGNSP PGUID 12 f f t f i 1 23 "1266" _null_ timetz_hash - _null_ ));
+DATA(insert OID = 1696 ( timetz_hash PGNSP PGUID 12 f f t f i 1 23 "1266" _null_ timetz_hash - _null_ ));
DESCR("hash");
-DATA(insert OID = 1697 ( interval_hash PGNSP PGUID 12 f f t f i 1 23 "1186" _null_ interval_hash - _null_ ));
+DATA(insert OID = 1697 ( interval_hash PGNSP PGUID 12 f f t f i 1 23 "1186" _null_ interval_hash - _null_ ));
DESCR("hash");
DESCR("less-than");
DATA(insert OID = 1723 ( numeric_le PGNSP PGUID 12 f f t f i 2 16 "1700 1700" _null_ numeric_le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1724 ( numeric_add PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_add - _null_ ));
+DATA(insert OID = 1724 ( numeric_add PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_add - _null_ ));
DESCR("add");
-DATA(insert OID = 1725 ( numeric_sub PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_sub - _null_ ));
+DATA(insert OID = 1725 ( numeric_sub PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_sub - _null_ ));
DESCR("subtract");
-DATA(insert OID = 1726 ( numeric_mul PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mul - _null_ ));
+DATA(insert OID = 1726 ( numeric_mul PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mul - _null_ ));
DESCR("multiply");
-DATA(insert OID = 1727 ( numeric_div PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_div - _null_ ));
+DATA(insert OID = 1727 ( numeric_div PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_div - _null_ ));
DESCR("divide");
-DATA(insert OID = 1728 ( mod PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mod - _null_ ));
+DATA(insert OID = 1728 ( mod PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mod - _null_ ));
DESCR("modulus");
-DATA(insert OID = 1729 ( numeric_mod PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mod - _null_ ));
+DATA(insert OID = 1729 ( numeric_mod PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_mod - _null_ ));
DESCR("modulus");
DATA(insert OID = 1730 ( sqrt PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ numeric_sqrt - _null_ ));
DESCR("square root");
DESCR("natural logarithm of n");
DATA(insert OID = 1735 ( numeric_ln PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ numeric_ln - _null_ ));
DESCR("natural logarithm of n");
-DATA(insert OID = 1736 ( log PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_log - _null_ ));
+DATA(insert OID = 1736 ( log PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_log - _null_ ));
DESCR("logarithm base m of n");
-DATA(insert OID = 1737 ( numeric_log PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_log - _null_ ));
+DATA(insert OID = 1737 ( numeric_log PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_log - _null_ ));
DESCR("logarithm base m of n");
-DATA(insert OID = 1738 ( pow PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
+DATA(insert OID = 1738 ( pow PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
DESCR("m raised to the power of n");
-DATA(insert OID = 2169 ( power PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
+DATA(insert OID = 2169 ( power PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
DESCR("m raised to the power of n");
-DATA(insert OID = 1739 ( numeric_power PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
+DATA(insert OID = 1739 ( numeric_power PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_power - _null_ ));
DESCR("m raised to the power of n");
-DATA(insert OID = 1740 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "23" _null_ int4_numeric - _null_ ));
+DATA(insert OID = 1740 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "23" _null_ int4_numeric - _null_ ));
DESCR("(internal)");
DATA(insert OID = 1741 ( log PGNSP PGUID 14 f f t f i 1 1700 "1700" _null_ "select pg_catalog.log(10, $1)" - _null_ ));
DESCR("logarithm base 10 of n");
DESCR("(internal)");
DATA(insert OID = 1743 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "701" _null_ float8_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1744 ( int4 PGNSP PGUID 12 f f t f i 1 23 "1700" _null_ numeric_int4 - _null_ ));
+DATA(insert OID = 1744 ( int4 PGNSP PGUID 12 f f t f i 1 23 "1700" _null_ numeric_int4 - _null_ ));
DESCR("(internal)");
DATA(insert OID = 1745 ( float4 PGNSP PGUID 12 f f t f i 1 700 "1700" _null_ numeric_float4 - _null_ ));
DESCR("(internal)");
DATA(insert OID = 2170 ( width_bucket PGNSP PGUID 12 f f t f i 4 23 "1700 1700 1700 23" _null_ width_bucket_numeric - _null_ ));
DESCR("bucket number of operand in equidepth histogram");
-DATA(insert OID = 1747 ( time_pl_interval PGNSP PGUID 12 f f t f i 2 1083 "1083 1186" _null_ time_pl_interval - _null_ ));
+DATA(insert OID = 1747 ( time_pl_interval PGNSP PGUID 12 f f t f i 2 1083 "1083 1186" _null_ time_pl_interval - _null_ ));
DESCR("plus");
-DATA(insert OID = 1748 ( time_mi_interval PGNSP PGUID 12 f f t f i 2 1083 "1083 1186" _null_ time_mi_interval - _null_ ));
+DATA(insert OID = 1748 ( time_mi_interval PGNSP PGUID 12 f f t f i 2 1083 "1083 1186" _null_ time_mi_interval - _null_ ));
DESCR("minus");
-DATA(insert OID = 1749 ( timetz_pl_interval PGNSP PGUID 12 f f t f i 2 1266 "1266 1186" _null_ timetz_pl_interval - _null_ ));
+DATA(insert OID = 1749 ( timetz_pl_interval PGNSP PGUID 12 f f t f i 2 1266 "1266 1186" _null_ timetz_pl_interval - _null_ ));
DESCR("plus");
-DATA(insert OID = 1750 ( timetz_mi_interval PGNSP PGUID 12 f f t f i 2 1266 "1266 1186" _null_ timetz_mi_interval - _null_ ));
+DATA(insert OID = 1750 ( timetz_mi_interval PGNSP PGUID 12 f f t f i 2 1266 "1266 1186" _null_ timetz_mi_interval - _null_ ));
DESCR("minus");
DATA(insert OID = 1764 ( numeric_inc PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ numeric_inc - _null_ ));
DESCR("increment by one");
-DATA(insert OID = 1766 ( numeric_smaller PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_smaller - _null_ ));
+DATA(insert OID = 1766 ( numeric_smaller PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_smaller - _null_ ));
DESCR("smaller of two numbers");
-DATA(insert OID = 1767 ( numeric_larger PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_larger - _null_ ));
+DATA(insert OID = 1767 ( numeric_larger PGNSP PGUID 12 f f t f i 2 1700 "1700 1700" _null_ numeric_larger - _null_ ));
DESCR("larger of two numbers");
DATA(insert OID = 1769 ( numeric_cmp PGNSP PGUID 12 f f t f i 2 23 "1700 1700" _null_ numeric_cmp - _null_ ));
DESCR("compare two numbers");
DATA(insert OID = 1771 ( numeric_uminus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ numeric_uminus - _null_ ));
DESCR("negate");
-DATA(insert OID = 1779 ( int8 PGNSP PGUID 12 f f t f i 1 20 "1700" _null_ numeric_int8 - _null_ ));
+DATA(insert OID = 1779 ( int8 PGNSP PGUID 12 f f t f i 1 20 "1700" _null_ numeric_int8 - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1781 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ int8_numeric - _null_ ));
+DATA(insert OID = 1781 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "20" _null_ int8_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1782 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "21" _null_ int2_numeric - _null_ ));
+DATA(insert OID = 1782 ( numeric PGNSP PGUID 12 f f t f i 1 1700 "21" _null_ int2_numeric - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 1783 ( int2 PGNSP PGUID 12 f f t f i 1 21 "1700" _null_ numeric_int2 - _null_ ));
+DATA(insert OID = 1783 ( int2 PGNSP PGUID 12 f f t f i 1 21 "1700" _null_ numeric_int2 - _null_ ));
DESCR("(internal)");
/* formatting */
DESCR("format int4 to text");
DATA(insert OID = 1774 ( to_char PGNSP PGUID 12 f f t f i 2 25 "20 25" _null_ int8_to_char - _null_ ));
DESCR("format int8 to text");
-DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 f f t f i 2 25 "700 25" _null_ float4_to_char - _null_ ));
+DATA(insert OID = 1775 ( to_char PGNSP PGUID 12 f f t f i 2 25 "700 25" _null_ float4_to_char - _null_ ));
DESCR("format float4 to text");
-DATA(insert OID = 1776 ( to_char PGNSP PGUID 12 f f t f i 2 25 "701 25" _null_ float8_to_char - _null_ ));
+DATA(insert OID = 1776 ( to_char PGNSP PGUID 12 f f t f i 2 25 "701 25" _null_ float8_to_char - _null_ ));
DESCR("format float8 to text");
DATA(insert OID = 1777 ( to_number PGNSP PGUID 12 f f t f i 2 1700 "25 25" _null_ numeric_to_number - _null_ ));
DESCR("convert text to numeric");
DESCR("join selectivity of case-insensitive regex non-match");
/* Aggregate-related functions */
-DATA(insert OID = 1830 ( float8_avg PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_avg - _null_ ));
+DATA(insert OID = 1830 ( float8_avg PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_avg - _null_ ));
DESCR("AVG aggregate final function");
-DATA(insert OID = 1831 ( float8_variance PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_variance - _null_ ));
+DATA(insert OID = 1831 ( float8_variance PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_variance - _null_ ));
DESCR("VARIANCE aggregate final function");
-DATA(insert OID = 1832 ( float8_stddev PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_stddev - _null_ ));
+DATA(insert OID = 1832 ( float8_stddev PGNSP PGUID 12 f f t f i 1 701 "1022" _null_ float8_stddev - _null_ ));
DESCR("STDDEV aggregate final function");
DATA(insert OID = 1833 ( numeric_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 1700" _null_ numeric_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 21" _null_ int2_accum - _null_ ));
+DATA(insert OID = 1834 ( int2_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 21" _null_ int2_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 23" _null_ int4_accum - _null_ ));
+DATA(insert OID = 1835 ( int4_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 23" _null_ int4_accum - _null_ ));
DESCR("aggregate transition function");
-DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 20" _null_ int8_accum - _null_ ));
+DATA(insert OID = 1836 ( int8_accum PGNSP PGUID 12 f f t f i 2 1231 "1231 20" _null_ int8_accum - _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 1837 ( numeric_avg PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ numeric_avg - _null_ ));
DESCR("AVG aggregate final function");
DESCR("VARIANCE aggregate final function");
DATA(insert OID = 1839 ( numeric_stddev PGNSP PGUID 12 f f t f i 1 1700 "1231" _null_ numeric_stddev - _null_ ));
DESCR("STDDEV aggregate final function");
-DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 f f f f i 2 20 "20 21" _null_ int2_sum - _null_ ));
+DATA(insert OID = 1840 ( int2_sum PGNSP PGUID 12 f f f f i 2 20 "20 21" _null_ int2_sum - _null_ ));
DESCR("SUM(int2) transition function");
-DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 f f f f i 2 20 "20 23" _null_ int4_sum - _null_ ));
+DATA(insert OID = 1841 ( int4_sum PGNSP PGUID 12 f f f f i 2 20 "20 23" _null_ int4_sum - _null_ ));
DESCR("SUM(int4) transition function");
-DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 f f f f i 2 1700 "1700 20" _null_ int8_sum - _null_ ));
+DATA(insert OID = 1842 ( int8_sum PGNSP PGUID 12 f f f f i 2 1700 "1700 20" _null_ int8_sum - _null_ ));
DESCR("SUM(int8) transition function");
DATA(insert OID = 1843 ( interval_accum PGNSP PGUID 12 f f t f i 2 1187 "1187 1186" _null_ interval_accum - _null_ ));
DESCR("aggregate transition function");
DATA(insert OID = 1844 ( interval_avg PGNSP PGUID 12 f f t f i 1 1186 "1187" _null_ interval_avg - _null_ ));
DESCR("AVG aggregate final function");
-DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 21" _null_ int2_avg_accum - _null_ ));
+DATA(insert OID = 1962 ( int2_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 21" _null_ int2_avg_accum - _null_ ));
DESCR("AVG(int2) transition function");
-DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 23" _null_ int4_avg_accum - _null_ ));
+DATA(insert OID = 1963 ( int4_avg_accum PGNSP PGUID 12 f f t f i 2 1016 "1016 23" _null_ int4_avg_accum - _null_ ));
DESCR("AVG(int4) transition function");
DATA(insert OID = 1964 ( int8_avg PGNSP PGUID 12 f f t f i 1 1700 "1016" _null_ int8_avg - _null_ ));
DESCR("AVG(int) aggregate final function");
/* To ASCII conversion */
-DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 f f t f i 1 25 "25" _null_ to_ascii_default - _null_ ));
+DATA(insert OID = 1845 ( to_ascii PGNSP PGUID 12 f f t f i 1 25 "25" _null_ to_ascii_default - _null_ ));
DESCR("encode text from DB encoding to ASCII text");
DATA(insert OID = 1846 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 23" _null_ to_ascii_enc - _null_ ));
DESCR("encode text from encoding to ASCII text");
DATA(insert OID = 1847 ( to_ascii PGNSP PGUID 12 f f t f i 2 25 "25 19" _null_ to_ascii_encname - _null_ ));
DESCR("encode text from encoding to ASCII text");
-DATA(insert OID = 1848 ( interval_pl_time PGNSP PGUID 14 f f t f i 2 1083 "1186 1083" _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 1848 ( interval_pl_time PGNSP PGUID 14 f f t f i 2 1083 "1186 1083" _null_ "select $2 + $1" - _null_ ));
DESCR("plus");
-DATA(insert OID = 1850 ( int28eq PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28eq - _null_ ));
+DATA(insert OID = 1850 ( int28eq PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1851 ( int28ne PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28ne - _null_ ));
+DATA(insert OID = 1851 ( int28ne PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1852 ( int28lt PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28lt - _null_ ));
+DATA(insert OID = 1852 ( int28lt PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1853 ( int28gt PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28gt - _null_ ));
+DATA(insert OID = 1853 ( int28gt PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1854 ( int28le PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28le - _null_ ));
+DATA(insert OID = 1854 ( int28le PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1855 ( int28ge PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28ge - _null_ ));
+DATA(insert OID = 1855 ( int28ge PGNSP PGUID 12 f f t f i 2 16 "21 20" _null_ int28ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1856 ( int82eq PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82eq - _null_ ));
+DATA(insert OID = 1856 ( int82eq PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82eq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1857 ( int82ne PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82ne - _null_ ));
+DATA(insert OID = 1857 ( int82ne PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82ne - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1858 ( int82lt PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82lt - _null_ ));
+DATA(insert OID = 1858 ( int82lt PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82lt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1859 ( int82gt PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82gt - _null_ ));
+DATA(insert OID = 1859 ( int82gt PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1860 ( int82le PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82le - _null_ ));
+DATA(insert OID = 1860 ( int82le PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82le - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1861 ( int82ge PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82ge - _null_ ));
+DATA(insert OID = 1861 ( int82ge PGNSP PGUID 12 f f t f i 2 16 "20 21" _null_ int82ge - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1892 ( int2and PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2and - _null_ ));
+DATA(insert OID = 1892 ( int2and PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2and - _null_ ));
DESCR("binary and");
-DATA(insert OID = 1893 ( int2or PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2or - _null_ ));
+DATA(insert OID = 1893 ( int2or PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2or - _null_ ));
DESCR("binary or");
-DATA(insert OID = 1894 ( int2xor PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2xor - _null_ ));
+DATA(insert OID = 1894 ( int2xor PGNSP PGUID 12 f f t f i 2 21 "21 21" _null_ int2xor - _null_ ));
DESCR("binary xor");
DATA(insert OID = 1895 ( int2not PGNSP PGUID 12 f f t f i 1 21 "21" _null_ int2not - _null_ ));
DESCR("binary not");
-DATA(insert OID = 1896 ( int2shl PGNSP PGUID 12 f f t f i 2 21 "21 23" _null_ int2shl - _null_ ));
+DATA(insert OID = 1896 ( int2shl PGNSP PGUID 12 f f t f i 2 21 "21 23" _null_ int2shl - _null_ ));
DESCR("binary shift left");
-DATA(insert OID = 1897 ( int2shr PGNSP PGUID 12 f f t f i 2 21 "21 23" _null_ int2shr - _null_ ));
+DATA(insert OID = 1897 ( int2shr PGNSP PGUID 12 f f t f i 2 21 "21 23" _null_ int2shr - _null_ ));
DESCR("binary shift right");
-DATA(insert OID = 1898 ( int4and PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4and - _null_ ));
+DATA(insert OID = 1898 ( int4and PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4and - _null_ ));
DESCR("binary and");
-DATA(insert OID = 1899 ( int4or PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4or - _null_ ));
+DATA(insert OID = 1899 ( int4or PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4or - _null_ ));
DESCR("binary or");
-DATA(insert OID = 1900 ( int4xor PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4xor - _null_ ));
+DATA(insert OID = 1900 ( int4xor PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4xor - _null_ ));
DESCR("binary xor");
DATA(insert OID = 1901 ( int4not PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4not - _null_ ));
DESCR("binary not");
-DATA(insert OID = 1902 ( int4shl PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4shl - _null_ ));
+DATA(insert OID = 1902 ( int4shl PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4shl - _null_ ));
DESCR("binary shift left");
-DATA(insert OID = 1903 ( int4shr PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4shr - _null_ ));
+DATA(insert OID = 1903 ( int4shr PGNSP PGUID 12 f f t f i 2 23 "23 23" _null_ int4shr - _null_ ));
DESCR("binary shift right");
-DATA(insert OID = 1904 ( int8and PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8and - _null_ ));
+DATA(insert OID = 1904 ( int8and PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8and - _null_ ));
DESCR("binary and");
-DATA(insert OID = 1905 ( int8or PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8or - _null_ ));
+DATA(insert OID = 1905 ( int8or PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8or - _null_ ));
DESCR("binary or");
-DATA(insert OID = 1906 ( int8xor PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8xor - _null_ ));
+DATA(insert OID = 1906 ( int8xor PGNSP PGUID 12 f f t f i 2 20 "20 20" _null_ int8xor - _null_ ));
DESCR("binary xor");
DATA(insert OID = 1907 ( int8not PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8not - _null_ ));
DESCR("binary not");
-DATA(insert OID = 1908 ( int8shl PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int8shl - _null_ ));
+DATA(insert OID = 1908 ( int8shl PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int8shl - _null_ ));
DESCR("binary shift left");
-DATA(insert OID = 1909 ( int8shr PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int8shr - _null_ ));
+DATA(insert OID = 1909 ( int8shr PGNSP PGUID 12 f f t f i 2 20 "20 23" _null_ int8shr - _null_ ));
DESCR("binary shift right");
-DATA(insert OID = 1910 ( int8up PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8up - _null_ ));
+DATA(insert OID = 1910 ( int8up PGNSP PGUID 12 f f t f i 1 20 "20" _null_ int8up - _null_ ));
DESCR("unary plus");
-DATA(insert OID = 1911 ( int2up PGNSP PGUID 12 f f t f i 1 21 "21" _null_ int2up - _null_ ));
+DATA(insert OID = 1911 ( int2up PGNSP PGUID 12 f f t f i 1 21 "21" _null_ int2up - _null_ ));
DESCR("unary plus");
-DATA(insert OID = 1912 ( int4up PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4up - _null_ ));
+DATA(insert OID = 1912 ( int4up PGNSP PGUID 12 f f t f i 1 23 "23" _null_ int4up - _null_ ));
DESCR("unary plus");
DATA(insert OID = 1913 ( float4up PGNSP PGUID 12 f f t f i 1 700 "700" _null_ float4up - _null_ ));
DESCR("unary plus");
DATA(insert OID = 1915 ( numeric_uplus PGNSP PGUID 12 f f t f i 1 1700 "1700" _null_ numeric_uplus - _null_ ));
DESCR("unary plus");
-DATA(insert OID = 1922 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_table_privilege_name_name - _null_ ));
+DATA(insert OID = 1922 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_table_privilege_name_name - _null_ ));
DESCR("user privilege on relation by username, rel name");
-DATA(insert OID = 1923 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_table_privilege_name_id - _null_ ));
+DATA(insert OID = 1923 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_table_privilege_name_id - _null_ ));
DESCR("user privilege on relation by username, rel oid");
-DATA(insert OID = 1924 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_table_privilege_id_name - _null_ ));
+DATA(insert OID = 1924 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_table_privilege_id_name - _null_ ));
DESCR("user privilege on relation by usesysid, rel name");
-DATA(insert OID = 1925 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_table_privilege_id_id - _null_ ));
+DATA(insert OID = 1925 ( has_table_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_table_privilege_id_id - _null_ ));
DESCR("user privilege on relation by usesysid, rel oid");
-DATA(insert OID = 1926 ( has_table_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_table_privilege_name - _null_ ));
+DATA(insert OID = 1926 ( has_table_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_table_privilege_name - _null_ ));
DESCR("current user privilege on relation by rel name");
-DATA(insert OID = 1927 ( has_table_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_table_privilege_id - _null_ ));
+DATA(insert OID = 1927 ( has_table_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_table_privilege_id - _null_ ));
DESCR("current user privilege on relation by rel oid");
DESCR("Statistics: Number of blocks fetched");
DATA(insert OID = 1935 ( pg_stat_get_blocks_hit PGNSP PGUID 12 f f t f s 1 20 "26" _null_ pg_stat_get_blocks_hit - _null_ ));
DESCR("Statistics: Number of blocks found in cache");
-DATA(insert OID = 1936 ( pg_stat_get_backend_idset PGNSP PGUID 12 f f t t s 0 23 "" _null_ pg_stat_get_backend_idset - _null_ ));
+DATA(insert OID = 1936 ( pg_stat_get_backend_idset PGNSP PGUID 12 f f t t s 0 23 "" _null_ pg_stat_get_backend_idset - _null_ ));
DESCR("Statistics: Currently active backend IDs");
-DATA(insert OID = 2026 ( pg_backend_pid PGNSP PGUID 12 f f t f s 0 23 "" _null_ pg_backend_pid - _null_ ));
+DATA(insert OID = 2026 ( pg_backend_pid PGNSP PGUID 12 f f t f s 0 23 "" _null_ pg_backend_pid - _null_ ));
DESCR("Statistics: Current backend PID");
DATA(insert OID = 2274 ( pg_stat_reset PGNSP PGUID 12 f f f f v 0 16 "" _null_ pg_stat_reset - _null_ ));
DESCR("Statistics: Reset collected statistics");
DATA(insert OID = 1947 ( decode PGNSP PGUID 12 f f t f i 2 17 "25 25" _null_ binary_decode - _null_ ));
DESCR("Convert ascii-encoded text string into bytea value");
-DATA(insert OID = 1948 ( byteaeq PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteaeq - _null_ ));
+DATA(insert OID = 1948 ( byteaeq PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteaeq - _null_ ));
DESCR("equal");
-DATA(insert OID = 1949 ( bytealt PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ bytealt - _null_ ));
+DATA(insert OID = 1949 ( bytealt PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ bytealt - _null_ ));
DESCR("less-than");
-DATA(insert OID = 1950 ( byteale PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteale - _null_ ));
+DATA(insert OID = 1950 ( byteale PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteale - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 1951 ( byteagt PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteagt - _null_ ));
+DATA(insert OID = 1951 ( byteagt PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteagt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 1952 ( byteage PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteage - _null_ ));
+DATA(insert OID = 1952 ( byteage PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteage - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 1953 ( byteane PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteane - _null_ ));
+DATA(insert OID = 1953 ( byteane PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteane - _null_ ));
DESCR("not equal");
-DATA(insert OID = 1954 ( byteacmp PGNSP PGUID 12 f f t f i 2 23 "17 17" _null_ byteacmp - _null_ ));
+DATA(insert OID = 1954 ( byteacmp PGNSP PGUID 12 f f t f i 2 23 "17 17" _null_ byteacmp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 1961 ( timestamp PGNSP PGUID 12 f f t f i 2 1114 "1114 23" _null_ timestamp_scale - _null_ ));
+DATA(insert OID = 1961 ( timestamp PGNSP PGUID 12 f f t f i 2 1114 "1114 23" _null_ timestamp_scale - _null_ ));
DESCR("adjust timestamp precision");
-DATA(insert OID = 1965 ( oidlarger PGNSP PGUID 12 f f t f i 2 26 "26 26" _null_ oidlarger - _null_ ));
+DATA(insert OID = 1965 ( oidlarger PGNSP PGUID 12 f f t f i 2 26 "26 26" _null_ oidlarger - _null_ ));
DESCR("larger of two");
-DATA(insert OID = 1966 ( oidsmaller PGNSP PGUID 12 f f t f i 2 26 "26 26" _null_ oidsmaller - _null_ ));
+DATA(insert OID = 1966 ( oidsmaller PGNSP PGUID 12 f f t f i 2 26 "26 26" _null_ oidsmaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 1967 ( timestamptz PGNSP PGUID 12 f f t f i 2 1184 "1184 23" _null_ timestamptz_scale - _null_ ));
+DATA(insert OID = 1967 ( timestamptz PGNSP PGUID 12 f f t f i 2 1184 "1184 23" _null_ timestamptz_scale - _null_ ));
DESCR("adjust timestamptz precision");
-DATA(insert OID = 1968 ( time PGNSP PGUID 12 f f t f i 2 1083 "1083 23" _null_ time_scale - _null_ ));
+DATA(insert OID = 1968 ( time PGNSP PGUID 12 f f t f i 2 1083 "1083 23" _null_ time_scale - _null_ ));
DESCR("adjust time precision");
-DATA(insert OID = 1969 ( timetz PGNSP PGUID 12 f f t f i 2 1266 "1266 23" _null_ timetz_scale - _null_ ));
+DATA(insert OID = 1969 ( timetz PGNSP PGUID 12 f f t f i 2 1266 "1266 23" _null_ timetz_scale - _null_ ));
DESCR("adjust time with time zone precision");
DATA(insert OID = 2005 ( bytealike PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ bytealike - _null_ ));
DESCR("matches LIKE expression");
DATA(insert OID = 2006 ( byteanlike PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteanlike - _null_ ));
DESCR("does not match LIKE expression");
-DATA(insert OID = 2007 ( like PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ bytealike - _null_ ));
+DATA(insert OID = 2007 ( like PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ bytealike - _null_ ));
DESCR("matches LIKE expression");
-DATA(insert OID = 2008 ( notlike PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteanlike - _null_ ));
+DATA(insert OID = 2008 ( notlike PGNSP PGUID 12 f f t f i 2 16 "17 17" _null_ byteanlike - _null_ ));
DESCR("does not match LIKE expression");
DATA(insert OID = 2009 ( like_escape PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ like_escape_bytea - _null_ ));
DESCR("convert LIKE pattern to use backslash escapes");
DATA(insert OID = 2010 ( length PGNSP PGUID 12 f f t f i 1 23 "17" _null_ byteaoctetlen - _null_ ));
DESCR("octet length");
-DATA(insert OID = 2011 ( byteacat PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ byteacat - _null_ ));
+DATA(insert OID = 2011 ( byteacat PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ byteacat - _null_ ));
DESCR("concatenate");
-DATA(insert OID = 2012 ( substring PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ bytea_substr - _null_ ));
+DATA(insert OID = 2012 ( substring PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ bytea_substr - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 2013 ( substring PGNSP PGUID 12 f f t f i 2 17 "17 23" _null_ bytea_substr_no_len - _null_ ));
+DATA(insert OID = 2013 ( substring PGNSP PGUID 12 f f t f i 2 17 "17 23" _null_ bytea_substr_no_len - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 2085 ( substr PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ bytea_substr - _null_ ));
+DATA(insert OID = 2085 ( substr PGNSP PGUID 12 f f t f i 3 17 "17 23 23" _null_ bytea_substr - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 2086 ( substr PGNSP PGUID 12 f f t f i 2 17 "17 23" _null_ bytea_substr_no_len - _null_ ));
+DATA(insert OID = 2086 ( substr PGNSP PGUID 12 f f t f i 2 17 "17 23" _null_ bytea_substr_no_len - _null_ ));
DESCR("return portion of string");
-DATA(insert OID = 2014 ( position PGNSP PGUID 12 f f t f i 2 23 "17 17" _null_ byteapos - _null_ ));
+DATA(insert OID = 2014 ( position PGNSP PGUID 12 f f t f i 2 23 "17 17" _null_ byteapos - _null_ ));
DESCR("return position of substring");
-DATA(insert OID = 2015 ( btrim PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ byteatrim - _null_ ));
+DATA(insert OID = 2015 ( btrim PGNSP PGUID 12 f f t f i 2 17 "17 17" _null_ byteatrim - _null_ ));
DESCR("trim both ends of string");
DATA(insert OID = 2019 ( time PGNSP PGUID 12 f f t f s 1 1083 "1184" _null_ timestamptz_time - _null_ ));
DESCR("truncate timestamp to specified units");
DATA(insert OID = 2021 ( date_part PGNSP PGUID 12 f f t f i 2 701 "25 1114" _null_ timestamp_part - _null_ ));
DESCR("extract field from timestamp");
-DATA(insert OID = 2022 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "25" _null_ text_timestamp - _null_ ));
+DATA(insert OID = 2022 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "25" _null_ text_timestamp - _null_ ));
DESCR("convert text to timestamp");
DATA(insert OID = 2023 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "702" _null_ abstime_timestamp - _null_ ));
DESCR("convert abstime to timestamp");
DATA(insert OID = 2024 ( timestamp PGNSP PGUID 12 f f t f i 1 1114 "1082" _null_ date_timestamp - _null_ ));
DESCR("convert date to timestamp");
-DATA(insert OID = 2025 ( timestamp PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ datetime_timestamp - _null_ ));
+DATA(insert OID = 2025 ( timestamp PGNSP PGUID 12 f f t f i 2 1114 "1082 1083" _null_ datetime_timestamp - _null_ ));
DESCR("convert date and time to timestamp");
DATA(insert OID = 2027 ( timestamp PGNSP PGUID 12 f f t f s 1 1114 "1184" _null_ timestamptz_timestamp - _null_ ));
DESCR("convert timestamp with time zone to timestamp");
DESCR("convert timestamp to date");
DATA(insert OID = 2030 ( abstime PGNSP PGUID 12 f f t f s 1 702 "1114" _null_ timestamp_abstime - _null_ ));
DESCR("convert timestamp to abstime");
-DATA(insert OID = 2031 ( timestamp_mi PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ timestamp_mi - _null_ ));
+DATA(insert OID = 2031 ( timestamp_mi PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ timestamp_mi - _null_ ));
DESCR("subtract");
-DATA(insert OID = 2032 ( timestamp_pl_interval PGNSP PGUID 12 f f t f i 2 1114 "1114 1186" _null_ timestamp_pl_interval - _null_ ));
+DATA(insert OID = 2032 ( timestamp_pl_interval PGNSP PGUID 12 f f t f i 2 1114 "1114 1186" _null_ timestamp_pl_interval - _null_ ));
DESCR("plus");
-DATA(insert OID = 2033 ( timestamp_mi_interval PGNSP PGUID 12 f f t f i 2 1114 "1114 1186" _null_ timestamp_mi_interval - _null_ ));
+DATA(insert OID = 2033 ( timestamp_mi_interval PGNSP PGUID 12 f f t f i 2 1114 "1114 1186" _null_ timestamp_mi_interval - _null_ ));
DESCR("minus");
DATA(insert OID = 2034 ( text PGNSP PGUID 12 f f t f s 1 25 "1114" _null_ timestamp_text - _null_ ));
DESCR("convert timestamp to text");
-DATA(insert OID = 2035 ( timestamp_smaller PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ timestamp_smaller - _null_ ));
+DATA(insert OID = 2035 ( timestamp_smaller PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ timestamp_smaller - _null_ ));
DESCR("smaller of two");
-DATA(insert OID = 2036 ( timestamp_larger PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ timestamp_larger - _null_ ));
+DATA(insert OID = 2036 ( timestamp_larger PGNSP PGUID 12 f f t f i 2 1114 "1114 1114" _null_ timestamp_larger - _null_ ));
DESCR("larger of two");
DATA(insert OID = 2037 ( timezone PGNSP PGUID 12 f f t f i 2 1266 "25 1266" _null_ timetz_zone - _null_ ));
DESCR("adjust time with time zone to new zone");
-DATA(insert OID = 2038 ( timezone PGNSP PGUID 12 f f t f i 2 1266 "1186 1266" _null_ timetz_izone - _null_ ));
+DATA(insert OID = 2038 ( timezone PGNSP PGUID 12 f f t f i 2 1266 "1186 1266" _null_ timetz_izone - _null_ ));
DESCR("adjust time with time zone to new zone");
-DATA(insert OID = 2041 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1114 1114 1114 1114" _null_ overlaps_timestamp - _null_ ));
+DATA(insert OID = 2041 ( overlaps PGNSP PGUID 12 f f f f i 4 16 "1114 1114 1114 1114" _null_ overlaps_timestamp - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1186 1114 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 2042 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1186 1114 1186" _null_ "select ($1, ($1 + $2)) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1114 1114 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
+DATA(insert OID = 2043 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1114 1114 1186" _null_ "select ($1, $2) overlaps ($3, ($3 + $4))" - _null_ ));
DESCR("SQL92 interval comparison");
-DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1186 1114 1114" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
+DATA(insert OID = 2044 ( overlaps PGNSP PGUID 14 f f f f i 4 16 "1114 1186 1114 1114" _null_ "select ($1, ($1 + $2)) overlaps ($3, $4)" - _null_ ));
DESCR("SQL92 interval comparison");
DATA(insert OID = 2045 ( timestamp_cmp PGNSP PGUID 12 f f t f i 2 23 "1114 1114" _null_ timestamp_cmp - _null_ ));
DESCR("less-equal-greater");
DESCR("greater-than-or-equal");
DATA(insert OID = 2057 ( timestamp_gt PGNSP PGUID 12 f f t f i 2 16 "1114 1114" _null_ timestamp_gt - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 2058 ( age PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ timestamp_age - _null_ ));
+DATA(insert OID = 2058 ( age PGNSP PGUID 12 f f t f i 2 1186 "1114 1114" _null_ timestamp_age - _null_ ));
DESCR("date difference preserving months and years");
DATA(insert OID = 2059 ( age PGNSP PGUID 14 f f t f s 1 1186 "1114" _null_ "select pg_catalog.age(cast(current_date as timestamp without time zone), $1)" - _null_ ));
DESCR("date difference from today preserving months and years");
DATA(insert OID = 2069 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "25 1114" _null_ timestamp_zone - _null_ ));
DESCR("adjust timestamp to new time zone");
-DATA(insert OID = 2070 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "1186 1114" _null_ timestamp_izone - _null_ ));
+DATA(insert OID = 2070 ( timezone PGNSP PGUID 12 f f t f i 2 1184 "1186 1114" _null_ timestamp_izone - _null_ ));
DESCR("adjust timestamp to new time zone");
-DATA(insert OID = 2071 ( date_pl_interval PGNSP PGUID 12 f f t f i 2 1114 "1082 1186" _null_ date_pl_interval - _null_ ));
+DATA(insert OID = 2071 ( date_pl_interval PGNSP PGUID 12 f f t f i 2 1114 "1082 1186" _null_ date_pl_interval - _null_ ));
DESCR("add");
-DATA(insert OID = 2072 ( date_mi_interval PGNSP PGUID 12 f f t f i 2 1114 "1082 1186" _null_ date_mi_interval - _null_ ));
+DATA(insert OID = 2072 ( date_mi_interval PGNSP PGUID 12 f f t f i 2 1114 "1082 1186" _null_ date_mi_interval - _null_ ));
DESCR("subtract");
DATA(insert OID = 2073 ( substring PGNSP PGUID 12 f f t f i 2 25 "25 25" _null_ textregexsubstr - _null_ ));
DESCR("extracts text matching regular expression");
-DATA(insert OID = 2074 ( substring PGNSP PGUID 14 f f t f i 3 25 "25 25 25" _null_ "select pg_catalog.substring($1, pg_catalog.similar_escape($2, $3))" - _null_ ));
+DATA(insert OID = 2074 ( substring PGNSP PGUID 14 f f t f i 3 25 "25 25 25" _null_ "select pg_catalog.substring($1, pg_catalog.similar_escape($2, $3))" - _null_ ));
DESCR("extracts text matching SQL99 regular expression");
DATA(insert OID = 2075 ( bit PGNSP PGUID 12 f f t f i 2 1560 "20 23" _null_ bitfromint8 - _null_ ));
DESCR("int8 to bitstring");
-DATA(insert OID = 2076 ( int8 PGNSP PGUID 12 f f t f i 1 20 "1560" _null_ bittoint8 - _null_ ));
+DATA(insert OID = 2076 ( int8 PGNSP PGUID 12 f f t f i 1 20 "1560" _null_ bittoint8 - _null_ ));
DESCR("bitstring to int8");
DATA(insert OID = 2077 ( current_setting PGNSP PGUID 12 f f t f s 1 25 "25" _null_ show_config_by_name - _null_ ));
DATA(insert OID = 2186 ( name_pattern_ne PGNSP PGUID 12 f f t f i 2 16 "19 19" _null_ name_pattern_ne - _null_ ));
DATA(insert OID = 2187 ( btname_pattern_cmp PGNSP PGUID 12 f f t f i 2 23 "19 19" _null_ btname_pattern_cmp - _null_ ));
-DATA(insert OID = 2188 ( btint48cmp PGNSP PGUID 12 f f t f i 2 23 "23 20" _null_ btint48cmp - _null_ ));
-DATA(insert OID = 2189 ( btint84cmp PGNSP PGUID 12 f f t f i 2 23 "20 23" _null_ btint84cmp - _null_ ));
-DATA(insert OID = 2190 ( btint24cmp PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ btint24cmp - _null_ ));
-DATA(insert OID = 2191 ( btint42cmp PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ btint42cmp - _null_ ));
-DATA(insert OID = 2192 ( btint28cmp PGNSP PGUID 12 f f t f i 2 23 "21 20" _null_ btint28cmp - _null_ ));
-DATA(insert OID = 2193 ( btint82cmp PGNSP PGUID 12 f f t f i 2 23 "20 21" _null_ btint82cmp - _null_ ));
-DATA(insert OID = 2194 ( btfloat48cmp PGNSP PGUID 12 f f t f i 2 23 "700 701" _null_ btfloat48cmp - _null_ ));
-DATA(insert OID = 2195 ( btfloat84cmp PGNSP PGUID 12 f f t f i 2 23 "701 700" _null_ btfloat84cmp - _null_ ));
+DATA(insert OID = 2188 ( btint48cmp PGNSP PGUID 12 f f t f i 2 23 "23 20" _null_ btint48cmp - _null_ ));
+DATA(insert OID = 2189 ( btint84cmp PGNSP PGUID 12 f f t f i 2 23 "20 23" _null_ btint84cmp - _null_ ));
+DATA(insert OID = 2190 ( btint24cmp PGNSP PGUID 12 f f t f i 2 23 "21 23" _null_ btint24cmp - _null_ ));
+DATA(insert OID = 2191 ( btint42cmp PGNSP PGUID 12 f f t f i 2 23 "23 21" _null_ btint42cmp - _null_ ));
+DATA(insert OID = 2192 ( btint28cmp PGNSP PGUID 12 f f t f i 2 23 "21 20" _null_ btint28cmp - _null_ ));
+DATA(insert OID = 2193 ( btint82cmp PGNSP PGUID 12 f f t f i 2 23 "20 21" _null_ btint82cmp - _null_ ));
+DATA(insert OID = 2194 ( btfloat48cmp PGNSP PGUID 12 f f t f i 2 23 "700 701" _null_ btfloat48cmp - _null_ ));
+DATA(insert OID = 2195 ( btfloat84cmp PGNSP PGUID 12 f f t f i 2 23 "701 700" _null_ btfloat84cmp - _null_ ));
DATA(insert OID = 2212 ( regprocedurein PGNSP PGUID 12 f f t f s 1 2202 "2275" _null_ regprocedurein - _null_ ));
DATA(insert OID = 2248 ( fmgr_sql_validator PGNSP PGUID 12 f f t f s 1 2278 "26" _null_ fmgr_sql_validator - _null_ ));
DESCR("(internal)");
-DATA(insert OID = 2250 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_database_privilege_name_name - _null_ ));
+DATA(insert OID = 2250 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_database_privilege_name_name - _null_ ));
DESCR("user privilege on database by username, database name");
-DATA(insert OID = 2251 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_database_privilege_name_id - _null_ ));
+DATA(insert OID = 2251 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_database_privilege_name_id - _null_ ));
DESCR("user privilege on database by username, database oid");
-DATA(insert OID = 2252 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_database_privilege_id_name - _null_ ));
+DATA(insert OID = 2252 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_database_privilege_id_name - _null_ ));
DESCR("user privilege on database by usesysid, database name");
-DATA(insert OID = 2253 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_database_privilege_id_id - _null_ ));
+DATA(insert OID = 2253 ( has_database_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_database_privilege_id_id - _null_ ));
DESCR("user privilege on database by usesysid, database oid");
-DATA(insert OID = 2254 ( has_database_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_database_privilege_name - _null_ ));
+DATA(insert OID = 2254 ( has_database_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_database_privilege_name - _null_ ));
DESCR("current user privilege on database by database name");
-DATA(insert OID = 2255 ( has_database_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_database_privilege_id - _null_ ));
+DATA(insert OID = 2255 ( has_database_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_database_privilege_id - _null_ ));
DESCR("current user privilege on database by database oid");
-DATA(insert OID = 2256 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_function_privilege_name_name - _null_ ));
+DATA(insert OID = 2256 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_function_privilege_name_name - _null_ ));
DESCR("user privilege on function by username, function name");
-DATA(insert OID = 2257 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_function_privilege_name_id - _null_ ));
+DATA(insert OID = 2257 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_function_privilege_name_id - _null_ ));
DESCR("user privilege on function by username, function oid");
-DATA(insert OID = 2258 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_function_privilege_id_name - _null_ ));
+DATA(insert OID = 2258 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_function_privilege_id_name - _null_ ));
DESCR("user privilege on function by usesysid, function name");
-DATA(insert OID = 2259 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_function_privilege_id_id - _null_ ));
+DATA(insert OID = 2259 ( has_function_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_function_privilege_id_id - _null_ ));
DESCR("user privilege on function by usesysid, function oid");
-DATA(insert OID = 2260 ( has_function_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_function_privilege_name - _null_ ));
+DATA(insert OID = 2260 ( has_function_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_function_privilege_name - _null_ ));
DESCR("current user privilege on function by function name");
-DATA(insert OID = 2261 ( has_function_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_function_privilege_id - _null_ ));
+DATA(insert OID = 2261 ( has_function_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_function_privilege_id - _null_ ));
DESCR("current user privilege on function by function oid");
-DATA(insert OID = 2262 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_language_privilege_name_name - _null_ ));
+DATA(insert OID = 2262 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_language_privilege_name_name - _null_ ));
DESCR("user privilege on language by username, language name");
-DATA(insert OID = 2263 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_language_privilege_name_id - _null_ ));
+DATA(insert OID = 2263 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_language_privilege_name_id - _null_ ));
DESCR("user privilege on language by username, language oid");
-DATA(insert OID = 2264 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_language_privilege_id_name - _null_ ));
+DATA(insert OID = 2264 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_language_privilege_id_name - _null_ ));
DESCR("user privilege on language by usesysid, language name");
-DATA(insert OID = 2265 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_language_privilege_id_id - _null_ ));
+DATA(insert OID = 2265 ( has_language_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_language_privilege_id_id - _null_ ));
DESCR("user privilege on language by usesysid, language oid");
-DATA(insert OID = 2266 ( has_language_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_language_privilege_name - _null_ ));
+DATA(insert OID = 2266 ( has_language_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_language_privilege_name - _null_ ));
DESCR("current user privilege on language by language name");
-DATA(insert OID = 2267 ( has_language_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_language_privilege_id - _null_ ));
+DATA(insert OID = 2267 ( has_language_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_language_privilege_id - _null_ ));
DESCR("current user privilege on language by language oid");
-DATA(insert OID = 2268 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_schema_privilege_name_name - _null_ ));
+DATA(insert OID = 2268 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_schema_privilege_name_name - _null_ ));
DESCR("user privilege on schema by username, schema name");
-DATA(insert OID = 2269 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_schema_privilege_name_id - _null_ ));
+DATA(insert OID = 2269 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_schema_privilege_name_id - _null_ ));
DESCR("user privilege on schema by username, schema oid");
-DATA(insert OID = 2270 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_schema_privilege_id_name - _null_ ));
+DATA(insert OID = 2270 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_schema_privilege_id_name - _null_ ));
DESCR("user privilege on schema by usesysid, schema name");
-DATA(insert OID = 2271 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_schema_privilege_id_id - _null_ ));
+DATA(insert OID = 2271 ( has_schema_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_schema_privilege_id_id - _null_ ));
DESCR("user privilege on schema by usesysid, schema oid");
-DATA(insert OID = 2272 ( has_schema_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_schema_privilege_name - _null_ ));
+DATA(insert OID = 2272 ( has_schema_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_schema_privilege_name - _null_ ));
DESCR("current user privilege on schema by schema name");
-DATA(insert OID = 2273 ( has_schema_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_schema_privilege_id - _null_ ));
+DATA(insert OID = 2273 ( has_schema_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_schema_privilege_id - _null_ ));
DESCR("current user privilege on schema by schema oid");
-DATA(insert OID = 2390 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_tablespace_privilege_name_name - _null_ ));
+DATA(insert OID = 2390 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "19 25 25" _null_ has_tablespace_privilege_name_name - _null_ ));
DESCR("user privilege on tablespace by username, tablespace name");
-DATA(insert OID = 2391 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_tablespace_privilege_name_id - _null_ ));
+DATA(insert OID = 2391 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "19 26 25" _null_ has_tablespace_privilege_name_id - _null_ ));
DESCR("user privilege on tablespace by username, tablespace oid");
-DATA(insert OID = 2392 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_tablespace_privilege_id_name - _null_ ));
+DATA(insert OID = 2392 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "23 25 25" _null_ has_tablespace_privilege_id_name - _null_ ));
DESCR("user privilege on tablespace by usesysid, tablespace name");
-DATA(insert OID = 2393 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_tablespace_privilege_id_id - _null_ ));
+DATA(insert OID = 2393 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 3 16 "23 26 25" _null_ has_tablespace_privilege_id_id - _null_ ));
DESCR("user privilege on tablespace by usesysid, tablespace oid");
-DATA(insert OID = 2394 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_tablespace_privilege_name - _null_ ));
+DATA(insert OID = 2394 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 2 16 "25 25" _null_ has_tablespace_privilege_name - _null_ ));
DESCR("current user privilege on tablespace by tablespace name");
-DATA(insert OID = 2395 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_tablespace_privilege_id - _null_ ));
+DATA(insert OID = 2395 ( has_tablespace_privilege PGNSP PGUID 12 f f t f s 2 16 "26 25" _null_ has_tablespace_privilege_id - _null_ ));
DESCR("current user privilege on tablespace by tablespace oid");
DATA(insert OID = 2290 ( record_in PGNSP PGUID 12 f f t f v 2 2249 "2275 26" _null_ record_in - _null_ ));
DESCR("calculates md5 hash");
/* crosstype operations for date vs. timestamp and timestamptz */
-DATA(insert OID = 2338 ( date_lt_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_lt_timestamp - _null_ ));
+DATA(insert OID = 2338 ( date_lt_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_lt_timestamp - _null_ ));
DESCR("less-than");
-DATA(insert OID = 2339 ( date_le_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_le_timestamp - _null_ ));
+DATA(insert OID = 2339 ( date_le_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_le_timestamp - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 2340 ( date_eq_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_eq_timestamp - _null_ ));
+DATA(insert OID = 2340 ( date_eq_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_eq_timestamp - _null_ ));
DESCR("equal");
-DATA(insert OID = 2341 ( date_gt_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_gt_timestamp - _null_ ));
+DATA(insert OID = 2341 ( date_gt_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_gt_timestamp - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 2342 ( date_ge_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_ge_timestamp - _null_ ));
+DATA(insert OID = 2342 ( date_ge_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_ge_timestamp - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 2343 ( date_ne_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_ne_timestamp - _null_ ));
+DATA(insert OID = 2343 ( date_ne_timestamp PGNSP PGUID 12 f f t f i 2 16 "1082 1114" _null_ date_ne_timestamp - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2344 ( date_cmp_timestamp PGNSP PGUID 12 f f t f i 2 23 "1082 1114" _null_ date_cmp_timestamp - _null_ ));
+DATA(insert OID = 2344 ( date_cmp_timestamp PGNSP PGUID 12 f f t f i 2 23 "1082 1114" _null_ date_cmp_timestamp - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 2351 ( date_lt_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_lt_timestamptz - _null_ ));
+DATA(insert OID = 2351 ( date_lt_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_lt_timestamptz - _null_ ));
DESCR("less-than");
-DATA(insert OID = 2352 ( date_le_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_le_timestamptz - _null_ ));
+DATA(insert OID = 2352 ( date_le_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_le_timestamptz - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 2353 ( date_eq_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_eq_timestamptz - _null_ ));
+DATA(insert OID = 2353 ( date_eq_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_eq_timestamptz - _null_ ));
DESCR("equal");
-DATA(insert OID = 2354 ( date_gt_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_gt_timestamptz - _null_ ));
+DATA(insert OID = 2354 ( date_gt_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_gt_timestamptz - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 2355 ( date_ge_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_ge_timestamptz - _null_ ));
+DATA(insert OID = 2355 ( date_ge_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_ge_timestamptz - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 2356 ( date_ne_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_ne_timestamptz - _null_ ));
+DATA(insert OID = 2356 ( date_ne_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1082 1184" _null_ date_ne_timestamptz - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2357 ( date_cmp_timestamptz PGNSP PGUID 12 f f t f s 2 23 "1082 1184" _null_ date_cmp_timestamptz - _null_ ));
+DATA(insert OID = 2357 ( date_cmp_timestamptz PGNSP PGUID 12 f f t f s 2 23 "1082 1184" _null_ date_cmp_timestamptz - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 2364 ( timestamp_lt_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_lt_date - _null_ ));
+DATA(insert OID = 2364 ( timestamp_lt_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_lt_date - _null_ ));
DESCR("less-than");
-DATA(insert OID = 2365 ( timestamp_le_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_le_date - _null_ ));
+DATA(insert OID = 2365 ( timestamp_le_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_le_date - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 2366 ( timestamp_eq_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_eq_date - _null_ ));
+DATA(insert OID = 2366 ( timestamp_eq_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_eq_date - _null_ ));
DESCR("equal");
-DATA(insert OID = 2367 ( timestamp_gt_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_gt_date - _null_ ));
+DATA(insert OID = 2367 ( timestamp_gt_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_gt_date - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 2368 ( timestamp_ge_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_ge_date - _null_ ));
+DATA(insert OID = 2368 ( timestamp_ge_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_ge_date - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 2369 ( timestamp_ne_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_ne_date - _null_ ));
+DATA(insert OID = 2369 ( timestamp_ne_date PGNSP PGUID 12 f f t f i 2 16 "1114 1082" _null_ timestamp_ne_date - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2370 ( timestamp_cmp_date PGNSP PGUID 12 f f t f i 2 23 "1114 1082" _null_ timestamp_cmp_date - _null_ ));
+DATA(insert OID = 2370 ( timestamp_cmp_date PGNSP PGUID 12 f f t f i 2 23 "1114 1082" _null_ timestamp_cmp_date - _null_ ));
DESCR("less-equal-greater");
-DATA(insert OID = 2377 ( timestamptz_lt_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_lt_date - _null_ ));
+DATA(insert OID = 2377 ( timestamptz_lt_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_lt_date - _null_ ));
DESCR("less-than");
-DATA(insert OID = 2378 ( timestamptz_le_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_le_date - _null_ ));
+DATA(insert OID = 2378 ( timestamptz_le_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_le_date - _null_ ));
DESCR("less-than-or-equal");
-DATA(insert OID = 2379 ( timestamptz_eq_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_eq_date - _null_ ));
+DATA(insert OID = 2379 ( timestamptz_eq_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_eq_date - _null_ ));
DESCR("equal");
-DATA(insert OID = 2380 ( timestamptz_gt_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_gt_date - _null_ ));
+DATA(insert OID = 2380 ( timestamptz_gt_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_gt_date - _null_ ));
DESCR("greater-than");
-DATA(insert OID = 2381 ( timestamptz_ge_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_ge_date - _null_ ));
+DATA(insert OID = 2381 ( timestamptz_ge_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_ge_date - _null_ ));
DESCR("greater-than-or-equal");
-DATA(insert OID = 2382 ( timestamptz_ne_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_ne_date - _null_ ));
+DATA(insert OID = 2382 ( timestamptz_ne_date PGNSP PGUID 12 f f t f s 2 16 "1184 1082" _null_ timestamptz_ne_date - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2383 ( timestamptz_cmp_date PGNSP PGUID 12 f f t f s 2 23 "1184 1082" _null_ timestamptz_cmp_date - _null_ ));
+DATA(insert OID = 2383 ( timestamptz_cmp_date PGNSP PGUID 12 f f t f s 2 23 "1184 1082" _null_ timestamptz_cmp_date - _null_ ));
DESCR("less-equal-greater");
/* crosstype operations for timestamp vs. timestamptz */
DESCR("greater-than-or-equal");
DATA(insert OID = 2525 ( timestamp_ne_timestamptz PGNSP PGUID 12 f f t f s 2 16 "1114 1184" _null_ timestamp_ne_timestamptz - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2526 ( timestamp_cmp_timestamptz PGNSP PGUID 12 f f t f s 2 23 "1114 1184" _null_ timestamp_cmp_timestamptz - _null_ ));
+DATA(insert OID = 2526 ( timestamp_cmp_timestamptz PGNSP PGUID 12 f f t f s 2 23 "1114 1184" _null_ timestamp_cmp_timestamptz - _null_ ));
DESCR("less-equal-greater");
DATA(insert OID = 2527 ( timestamptz_lt_timestamp PGNSP PGUID 12 f f t f s 2 16 "1184 1114" _null_ timestamptz_lt_timestamp - _null_ ));
DESCR("greater-than-or-equal");
DATA(insert OID = 2532 ( timestamptz_ne_timestamp PGNSP PGUID 12 f f t f s 2 16 "1184 1114" _null_ timestamptz_ne_timestamp - _null_ ));
DESCR("not equal");
-DATA(insert OID = 2533 ( timestamptz_cmp_timestamp PGNSP PGUID 12 f f t f s 2 23 "1184 1114" _null_ timestamptz_cmp_timestamp - _null_ ));
+DATA(insert OID = 2533 ( timestamptz_cmp_timestamp PGNSP PGUID 12 f f t f s 2 23 "1184 1114" _null_ timestamptz_cmp_timestamp - _null_ ));
DESCR("less-equal-greater");
DESCR("I/O");
DATA(insert OID = 2403 ( record_send PGNSP PGUID 12 f f t f v 2 17 "2249 26" _null_ record_send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2404 ( int2recv PGNSP PGUID 12 f f t f i 1 21 "2281" _null_ int2recv - _null_ ));
+DATA(insert OID = 2404 ( int2recv PGNSP PGUID 12 f f t f i 1 21 "2281" _null_ int2recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2405 ( int2send PGNSP PGUID 12 f f t f i 1 17 "21" _null_ int2send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2406 ( int4recv PGNSP PGUID 12 f f t f i 1 23 "2281" _null_ int4recv - _null_ ));
+DATA(insert OID = 2406 ( int4recv PGNSP PGUID 12 f f t f i 1 23 "2281" _null_ int4recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2407 ( int4send PGNSP PGUID 12 f f t f i 1 17 "23" _null_ int4send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2408 ( int8recv PGNSP PGUID 12 f f t f i 1 20 "2281" _null_ int8recv - _null_ ));
+DATA(insert OID = 2408 ( int8recv PGNSP PGUID 12 f f t f i 1 20 "2281" _null_ int8recv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2409 ( int8send PGNSP PGUID 12 f f t f i 1 17 "20" _null_ int8send - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2410 ( int2vectorrecv PGNSP PGUID 12 f f t f i 1 22 "2281" _null_ int2vectorrecv - _null_ ));
+DATA(insert OID = 2410 ( int2vectorrecv PGNSP PGUID 12 f f t f i 1 22 "2281" _null_ int2vectorrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2411 ( int2vectorsend PGNSP PGUID 12 f f t f i 1 17 "22" _null_ int2vectorsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2412 ( bytearecv PGNSP PGUID 12 f f t f i 1 17 "2281" _null_ bytearecv - _null_ ));
+DATA(insert OID = 2412 ( bytearecv PGNSP PGUID 12 f f t f i 1 17 "2281" _null_ bytearecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2413 ( byteasend PGNSP PGUID 12 f f t f i 1 17 "17" _null_ byteasend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2414 ( textrecv PGNSP PGUID 12 f f t f s 1 25 "2281" _null_ textrecv - _null_ ));
+DATA(insert OID = 2414 ( textrecv PGNSP PGUID 12 f f t f s 1 25 "2281" _null_ textrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2415 ( textsend PGNSP PGUID 12 f f t f s 1 17 "25" _null_ textsend - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2417 ( unknownsend PGNSP PGUID 12 f f t f i 1 17 "705" _null_ unknownsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2418 ( oidrecv PGNSP PGUID 12 f f t f i 1 26 "2281" _null_ oidrecv - _null_ ));
+DATA(insert OID = 2418 ( oidrecv PGNSP PGUID 12 f f t f i 1 26 "2281" _null_ oidrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2419 ( oidsend PGNSP PGUID 12 f f t f i 1 17 "26" _null_ oidsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2420 ( oidvectorrecv PGNSP PGUID 12 f f t f i 1 30 "2281" _null_ oidvectorrecv - _null_ ));
+DATA(insert OID = 2420 ( oidvectorrecv PGNSP PGUID 12 f f t f i 1 30 "2281" _null_ oidvectorrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2421 ( oidvectorsend PGNSP PGUID 12 f f t f i 1 17 "30" _null_ oidvectorsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2422 ( namerecv PGNSP PGUID 12 f f t f s 1 19 "2281" _null_ namerecv - _null_ ));
+DATA(insert OID = 2422 ( namerecv PGNSP PGUID 12 f f t f s 1 19 "2281" _null_ namerecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2423 ( namesend PGNSP PGUID 12 f f t f s 1 17 "19" _null_ namesend - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2430 ( bpcharrecv PGNSP PGUID 12 f f t f s 1 1042 "2281" _null_ bpcharrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2431 ( bpcharsend PGNSP PGUID 12 f f t f s 1 17 "1042" _null_ bpcharsend - _null_ ));
+DATA(insert OID = 2431 ( bpcharsend PGNSP PGUID 12 f f t f s 1 17 "1042" _null_ bpcharsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2432 ( varcharrecv PGNSP PGUID 12 f f t f s 1 1043 "2281" _null_ varcharrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2433 ( varcharsend PGNSP PGUID 12 f f t f s 1 17 "1043" _null_ varcharsend - _null_ ));
+DATA(insert OID = 2433 ( varcharsend PGNSP PGUID 12 f f t f s 1 17 "1043" _null_ varcharsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2434 ( charrecv PGNSP PGUID 12 f f t f i 1 18 "2281" _null_ charrecv - _null_ ));
+DATA(insert OID = 2434 ( charrecv PGNSP PGUID 12 f f t f i 1 18 "2281" _null_ charrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2435 ( charsend PGNSP PGUID 12 f f t f i 1 17 "18" _null_ charsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2436 ( boolrecv PGNSP PGUID 12 f f t f i 1 16 "2281" _null_ boolrecv - _null_ ));
+DATA(insert OID = 2436 ( boolrecv PGNSP PGUID 12 f f t f i 1 16 "2281" _null_ boolrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2437 ( boolsend PGNSP PGUID 12 f f t f i 1 17 "16" _null_ boolsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2438 ( tidrecv PGNSP PGUID 12 f f t f i 1 27 "2281" _null_ tidrecv - _null_ ));
+DATA(insert OID = 2438 ( tidrecv PGNSP PGUID 12 f f t f i 1 27 "2281" _null_ tidrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2439 ( tidsend PGNSP PGUID 12 f f t f i 1 17 "27" _null_ tidsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2440 ( xidrecv PGNSP PGUID 12 f f t f i 1 28 "2281" _null_ xidrecv - _null_ ));
+DATA(insert OID = 2440 ( xidrecv PGNSP PGUID 12 f f t f i 1 28 "2281" _null_ xidrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2441 ( xidsend PGNSP PGUID 12 f f t f i 1 17 "28" _null_ xidsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2442 ( cidrecv PGNSP PGUID 12 f f t f i 1 29 "2281" _null_ cidrecv - _null_ ));
+DATA(insert OID = 2442 ( cidrecv PGNSP PGUID 12 f f t f i 1 29 "2281" _null_ cidrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2443 ( cidsend PGNSP PGUID 12 f f t f i 1 17 "29" _null_ cidsend - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2444 ( regprocrecv PGNSP PGUID 12 f f t f i 1 24 "2281" _null_ regprocrecv - _null_ ));
+DATA(insert OID = 2444 ( regprocrecv PGNSP PGUID 12 f f t f i 1 24 "2281" _null_ regprocrecv - _null_ ));
DESCR("I/O");
DATA(insert OID = 2445 ( regprocsend PGNSP PGUID 12 f f t f i 1 17 "24" _null_ regprocsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2446 ( regprocedurerecv PGNSP PGUID 12 f f t f i 1 2202 "2281" _null_ regprocedurerecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2447 ( regproceduresend PGNSP PGUID 12 f f t f i 1 17 "2202" _null_ regproceduresend - _null_ ));
+DATA(insert OID = 2447 ( regproceduresend PGNSP PGUID 12 f f t f i 1 17 "2202" _null_ regproceduresend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2448 ( regoperrecv PGNSP PGUID 12 f f t f i 1 2203 "2281" _null_ regoperrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2449 ( regopersend PGNSP PGUID 12 f f t f i 1 17 "2203" _null_ regopersend - _null_ ));
+DATA(insert OID = 2449 ( regopersend PGNSP PGUID 12 f f t f i 1 17 "2203" _null_ regopersend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2450 ( regoperatorrecv PGNSP PGUID 12 f f t f i 1 2204 "2281" _null_ regoperatorrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2451 ( regoperatorsend PGNSP PGUID 12 f f t f i 1 17 "2204" _null_ regoperatorsend - _null_ ));
+DATA(insert OID = 2451 ( regoperatorsend PGNSP PGUID 12 f f t f i 1 17 "2204" _null_ regoperatorsend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2452 ( regclassrecv PGNSP PGUID 12 f f t f i 1 2205 "2281" _null_ regclassrecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2453 ( regclasssend PGNSP PGUID 12 f f t f i 1 17 "2205" _null_ regclasssend - _null_ ));
+DATA(insert OID = 2453 ( regclasssend PGNSP PGUID 12 f f t f i 1 17 "2205" _null_ regclasssend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2454 ( regtyperecv PGNSP PGUID 12 f f t f i 1 2206 "2281" _null_ regtyperecv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2455 ( regtypesend PGNSP PGUID 12 f f t f i 1 17 "2206" _null_ regtypesend - _null_ ));
+DATA(insert OID = 2455 ( regtypesend PGNSP PGUID 12 f f t f i 1 17 "2206" _null_ regtypesend - _null_ ));
DESCR("I/O");
DATA(insert OID = 2456 ( bit_recv PGNSP PGUID 12 f f t f i 1 1560 "2281" _null_ bit_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2457 ( bit_send PGNSP PGUID 12 f f t f i 1 17 "1560" _null_ bit_send - _null_ ));
+DATA(insert OID = 2457 ( bit_send PGNSP PGUID 12 f f t f i 1 17 "1560" _null_ bit_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2458 ( varbit_recv PGNSP PGUID 12 f f t f i 1 1562 "2281" _null_ varbit_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2459 ( varbit_send PGNSP PGUID 12 f f t f i 1 17 "1562" _null_ varbit_send - _null_ ));
+DATA(insert OID = 2459 ( varbit_send PGNSP PGUID 12 f f t f i 1 17 "1562" _null_ varbit_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2460 ( numeric_recv PGNSP PGUID 12 f f t f i 1 1700 "2281" _null_ numeric_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2461 ( numeric_send PGNSP PGUID 12 f f t f i 1 17 "1700" _null_ numeric_send - _null_ ));
+DATA(insert OID = 2461 ( numeric_send PGNSP PGUID 12 f f t f i 1 17 "1700" _null_ numeric_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2462 ( abstimerecv PGNSP PGUID 12 f f t f i 1 702 "2281" _null_ abstimerecv - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2468 ( date_recv PGNSP PGUID 12 f f t f i 1 1082 "2281" _null_ date_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2469 ( date_send PGNSP PGUID 12 f f t f i 1 17 "1082" _null_ date_send - _null_ ));
+DATA(insert OID = 2469 ( date_send PGNSP PGUID 12 f f t f i 1 17 "1082" _null_ date_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2470 ( time_recv PGNSP PGUID 12 f f t f i 1 1083 "2281" _null_ time_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2471 ( time_send PGNSP PGUID 12 f f t f i 1 17 "1083" _null_ time_send - _null_ ));
+DATA(insert OID = 2471 ( time_send PGNSP PGUID 12 f f t f i 1 17 "1083" _null_ time_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2472 ( timetz_recv PGNSP PGUID 12 f f t f i 1 1266 "2281" _null_ timetz_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2473 ( timetz_send PGNSP PGUID 12 f f t f i 1 17 "1266" _null_ timetz_send - _null_ ));
+DATA(insert OID = 2473 ( timetz_send PGNSP PGUID 12 f f t f i 1 17 "1266" _null_ timetz_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2474 ( timestamp_recv PGNSP PGUID 12 f f t f i 1 1114 "2281" _null_ timestamp_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2475 ( timestamp_send PGNSP PGUID 12 f f t f i 1 17 "1114" _null_ timestamp_send - _null_ ));
+DATA(insert OID = 2475 ( timestamp_send PGNSP PGUID 12 f f t f i 1 17 "1114" _null_ timestamp_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2476 ( timestamptz_recv PGNSP PGUID 12 f f t f i 1 1184 "2281" _null_ timestamptz_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2477 ( timestamptz_send PGNSP PGUID 12 f f t f i 1 17 "1184" _null_ timestamptz_send - _null_ ));
+DATA(insert OID = 2477 ( timestamptz_send PGNSP PGUID 12 f f t f i 1 17 "1184" _null_ timestamptz_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2478 ( interval_recv PGNSP PGUID 12 f f t f i 1 1186 "2281" _null_ interval_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2479 ( interval_send PGNSP PGUID 12 f f t f i 1 17 "1186" _null_ interval_send - _null_ ));
+DATA(insert OID = 2479 ( interval_send PGNSP PGUID 12 f f t f i 1 17 "1186" _null_ interval_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2480 ( lseg_recv PGNSP PGUID 12 f f t f i 1 601 "2281" _null_ lseg_recv - _null_ ));
DESCR("I/O");
DESCR("I/O");
DATA(insert OID = 2500 ( cstring_recv PGNSP PGUID 12 f f t f s 1 2275 "2281" _null_ cstring_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2501 ( cstring_send PGNSP PGUID 12 f f t f s 1 17 "2275" _null_ cstring_send - _null_ ));
+DATA(insert OID = 2501 ( cstring_send PGNSP PGUID 12 f f t f s 1 17 "2275" _null_ cstring_send - _null_ ));
DESCR("I/O");
DATA(insert OID = 2502 ( anyarray_recv PGNSP PGUID 12 f f t f s 1 2277 "2281" _null_ anyarray_recv - _null_ ));
DESCR("I/O");
-DATA(insert OID = 2503 ( anyarray_send PGNSP PGUID 12 f f t f s 1 17 "2277" _null_ anyarray_send - _null_ ));
+DATA(insert OID = 2503 ( anyarray_send PGNSP PGUID 12 f f t f s 1 17 "2277" _null_ anyarray_send - _null_ ));
DESCR("I/O");
/* System-view support functions with pretty-print option */
DESCR("select statement of a view with pretty-print option");
DATA(insert OID = 2506 ( pg_get_viewdef PGNSP PGUID 12 f f t f s 2 25 "26 16" _null_ pg_get_viewdef_ext - _null_ ));
DESCR("select statement of a view with pretty-print option");
-DATA(insert OID = 2507 ( pg_get_indexdef PGNSP PGUID 12 f f t f s 3 25 "26 23 16" _null_ pg_get_indexdef_ext - _null_ ));
+DATA(insert OID = 2507 ( pg_get_indexdef PGNSP PGUID 12 f f t f s 3 25 "26 23 16" _null_ pg_get_indexdef_ext - _null_ ));
DESCR("index description (full create statement or single expression) with pretty-print option");
DATA(insert OID = 2508 ( pg_get_constraintdef PGNSP PGUID 12 f f t f s 2 25 "26 16" _null_ pg_get_constraintdef_ext - _null_ ));
DESCR("constraint description with pretty-print option");
DESCR("non-persistent series generator");
/* boolean aggregates */
-DATA(insert OID = 2515 ( booland_statefunc PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ booland_statefunc - _null_ ));
+DATA(insert OID = 2515 ( booland_statefunc PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ booland_statefunc - _null_ ));
DESCR("boolean-and aggregate transition function");
-DATA(insert OID = 2516 ( boolor_statefunc PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolor_statefunc - _null_ ));
+DATA(insert OID = 2516 ( boolor_statefunc PGNSP PGUID 12 f f t f i 2 16 "16 16" _null_ boolor_statefunc - _null_ ));
DESCR("boolean-or aggregate transition function");
-DATA(insert OID = 2517 ( bool_and PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2517 ( bool_and PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
DESCR("boolean-and aggregate");
/* ANY, SOME? These names conflict with subquery operators. See doc. */
-DATA(insert OID = 2518 ( bool_or PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2518 ( bool_or PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
DESCR("boolean-or aggregate");
-DATA(insert OID = 2519 ( every PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
+DATA(insert OID = 2519 ( every PGNSP PGUID 12 t f f f i 1 16 "16" _null_ aggregate_dummy - _null_ ));
DESCR("boolean-and aggregate");
/* bitwise integer aggregates */
-DATA(insert OID = 2236 ( bit_and PGNSP PGUID 12 t f f f i 1 21 "21" _null_ aggregate_dummy - _null_));
+DATA(insert OID = 2236 ( bit_and PGNSP PGUID 12 t f f f i 1 21 "21" _null_ aggregate_dummy - _null_));
DESCR("bitwise-and smallint aggregate");
DATA(insert OID = 2237 ( bit_or PGNSP PGUID 12 t f f f i 1 21 "21" _null_ aggregate_dummy - _null_));
DESCR("bitwise-or smallint aggregate");
-DATA(insert OID = 2238 ( bit_and PGNSP PGUID 12 t f f f i 1 23 "23" _null_ aggregate_dummy - _null_));
+DATA(insert OID = 2238 ( bit_and PGNSP PGUID 12 t f f f i 1 23 "23" _null_ aggregate_dummy - _null_));
DESCR("bitwise-and integer aggregate");
DATA(insert OID = 2239 ( bit_or PGNSP PGUID 12 t f f f i 1 23 "23" _null_ aggregate_dummy - _null_));
DESCR("bitwise-or integer aggregate");
-DATA(insert OID = 2240 ( bit_and PGNSP PGUID 12 t f f f i 1 20 "20" _null_ aggregate_dummy - _null_));
+DATA(insert OID = 2240 ( bit_and PGNSP PGUID 12 t f f f i 1 20 "20" _null_ aggregate_dummy - _null_));
DESCR("bitwise-and bigint aggregate");
DATA(insert OID = 2241 ( bit_or PGNSP PGUID 12 t f f f i 1 20 "20" _null_ aggregate_dummy - _null_));
DESCR("bitwise-or bigint aggregate");
-DATA(insert OID = 2242 ( bit_and PGNSP PGUID 12 t f f f i 1 1560 "1560" _null_ aggregate_dummy - _null_));
+DATA(insert OID = 2242 ( bit_and PGNSP PGUID 12 t f f f i 1 1560 "1560" _null_ aggregate_dummy - _null_));
DESCR("bitwise-and bit aggregate");
DATA(insert OID = 2243 ( bit_or PGNSP PGUID 12 t f f f i 1 1560 "1560" _null_ aggregate_dummy - _null_));
DESCR("bitwise-or bit aggregate");
/* formerly-missing interval + datetime operators */
-DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 f f t f i 2 1114 "1186 1082" _null_ "select $2 + $1" - _null_ ));
-DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 f f t f i 2 1266 "1186 1266" _null_ "select $2 + $1" - _null_ ));
-DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 f f t f i 2 1114 "1186 1114" _null_ "select $2 + $1" - _null_ ));
-DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 f f t f i 2 1184 "1186 1184" _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 2546 ( interval_pl_date PGNSP PGUID 14 f f t f i 2 1114 "1186 1082" _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 2547 ( interval_pl_timetz PGNSP PGUID 14 f f t f i 2 1266 "1186 1266" _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 2548 ( interval_pl_timestamp PGNSP PGUID 14 f f t f i 2 1114 "1186 1114" _null_ "select $2 + $1" - _null_ ));
+DATA(insert OID = 2549 ( interval_pl_timestamptz PGNSP PGUID 14 f f t f i 2 1184 "1186 1184" _null_ "select $2 + $1" - _null_ ));
DATA(insert OID = 2550 ( integer_pl_date PGNSP PGUID 14 f f t f i 2 1082 "23 1082" _null_ "select $2 + $1" - _null_ ));
DATA(insert OID = 2556 ( pg_tablespace_databases PGNSP PGUID 12 f f t t s 1 26 "26" _null_ pg_tablespace_databases - _null_));
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_statistic.h,v 1.26 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_statistic.h,v 1.27 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
*
* Code reading the pg_statistic relation should not assume that a particular
* data "kind" will appear in any particular slot. Instead, search the
- * stakind fields to see if the desired data is available. (The standard
+ * stakind fields to see if the desired data is available. (The standard
* function get_attstatsslot() may be used for this.)
*/
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_tablespace.h,v 1.3 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_tablespace.h,v 1.4 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
#define Anum_pg_tablespace_spclocation 3
#define Anum_pg_tablespace_spcacl 4
-DATA(insert OID = 1663 ( pg_default PGUID "" _null_ ));
+DATA(insert OID = 1663 ( pg_default PGUID "" _null_ ));
DATA(insert OID = 1664 ( pg_global PGUID "" _null_ ));
#define DEFAULTTABLESPACE_OID 1663
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.155 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/catalog/pg_type.h,v 1.156 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* the genbki.sh script reads this file and generates .bki
DESCR("array of INDEX_MAX_KEYS oids, used in system tables");
#define OIDVECTOROID 30
-DATA(insert OID = 71 ( pg_type PGNSP PGUID -1 f c t \054 1247 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 75 ( pg_attribute PGNSP PGUID -1 f c t \054 1249 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 81 ( pg_proc PGNSP PGUID -1 f c t \054 1255 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 83 ( pg_class PGNSP PGUID -1 f c t \054 1259 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 86 ( pg_shadow PGNSP PGUID -1 f c t \054 1260 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 87 ( pg_group PGNSP PGUID -1 f c t \054 1261 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
-DATA(insert OID = 88 ( pg_database PGNSP PGUID -1 f c t \054 1262 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 71 ( pg_type PGNSP PGUID -1 f c t \054 1247 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 75 ( pg_attribute PGNSP PGUID -1 f c t \054 1249 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 81 ( pg_proc PGNSP PGUID -1 f c t \054 1255 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 83 ( pg_class PGNSP PGUID -1 f c t \054 1259 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 86 ( pg_shadow PGNSP PGUID -1 f c t \054 1260 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 87 ( pg_group PGNSP PGUID -1 f c t \054 1261 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
+DATA(insert OID = 88 ( pg_database PGNSP PGUID -1 f c t \054 1262 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
DATA(insert OID = 90 ( pg_tablespace PGNSP PGUID -1 f c t \054 1213 0 record_in record_out record_recv record_send - d x f 0 -1 0 _null_ _null_ ));
/* OIDS 100 - 199 */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994-5, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.25 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/cluster.h,v 1.26 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void check_index_is_clusterable(Relation OldHeap, Oid indexOid);
extern void mark_index_clustered(Relation rel, Oid indexOid);
-extern Oid make_new_heap(Oid OIDOldHeap, const char *NewName,
- Oid NewTableSpace);
+extern Oid make_new_heap(Oid OIDOldHeap, const char *NewName,
+ Oid NewTableSpace);
extern void swap_relation_files(Oid r1, Oid r2);
#endif /* CLUSTER_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.60 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/defrem.h,v 1.61 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ReindexTable(RangeVar *relation, bool force);
extern void ReindexDatabase(const char *databaseName, bool force, bool all);
extern char *makeObjectName(const char *name1, const char *name2,
- const char *label);
+ const char *label);
extern char *ChooseRelationName(const char *name1, const char *name2,
- const char *label, Oid namespace);
+ const char *label, Oid namespace);
/* commands/functioncmds.c */
extern void CreateFunction(CreateFunctionStmt *stmt);
extern void RemoveOperator(RemoveOperStmt *stmt);
extern void RemoveOperatorById(Oid operOid);
extern void AlterOperatorOwner(List *name, TypeName *typeName1,
- TypeName *typename2, AclId newOwnerSysId);
+ TypeName *typename2, AclId newOwnerSysId);
/* commands/aggregatecmds.c */
extern void DefineAggregate(List *names, List *parameters);
extern void RemoveOpClass(RemoveOpClassStmt *stmt);
extern void RemoveOpClassById(Oid opclassOid);
extern void RenameOpClass(List *name, const char *access_method, const char *newname);
-extern void AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId);
+extern void AlterOpClassOwner(List *name, const char *access_method, AclId newOwnerSysId);
/* support routines in commands/define.c */
*
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.11 2004/08/29 04:13:05 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/prepare.h,v 1.12 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Utility statements PREPARE, EXECUTE, DEALLOCATE, EXPLAIN EXECUTE */
extern void PrepareQuery(PrepareStmt *stmt);
extern void ExecuteQuery(ExecuteStmt *stmt, DestReceiver *dest,
- char *completionTag);
+ char *completionTag);
extern void DeallocateQuery(DeallocateStmt *stmt);
extern void ExplainExecuteQuery(ExplainStmt *stmt, TupOutputState *tstate);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.18 2004/08/29 04:13:06 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/tablecmds.h,v 1.19 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void PreCommit_on_commit_actions(void);
extern void AtEOXact_on_commit_actions(bool isCommit, TransactionId xid);
extern void AtEOSubXact_on_commit_actions(bool isCommit,
- TransactionId childXid,
- TransactionId parentXid);
+ TransactionId childXid,
+ TransactionId parentXid);
#endif /* TABLECMDS_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.47 2004/08/29 04:13:06 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/trigger.h,v 1.48 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* in utils/adt/ri_triggers.c
*/
extern bool RI_FKey_keyequal_upd(TriggerData *trigdata);
-extern bool RI_Initial_Check(FkConstraint *fkconstraint,
- Relation rel,
- Relation pkrel);
+extern bool RI_Initial_Check(FkConstraint *fkconstraint,
+ Relation rel,
+ Relation pkrel);
#endif /* TRIGGER_H */
* Commands for manipulating users and groups.
*
*
- * $PostgreSQL: pgsql/src/include/commands/user.h,v 1.23 2004/07/28 14:23:31 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/commands/user.h,v 1.24 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AtEOXact_UpdatePasswordFile(bool isCommit);
extern void AtEOSubXact_UpdatePasswordFile(bool isCommit, TransactionId myXid,
- TransactionId parentXid);
+ TransactionId parentXid);
#endif /* USER_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.56 2004/08/29 04:13:06 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/commands/vacuum.h,v 1.57 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*----------
* ANALYZE builds one of these structs for each attribute (column) that is
- * to be analyzed. The struct and subsidiary data are in anl_context,
+ * to be analyzed. The struct and subsidiary data are in anl_context,
* so they live until the end of the ANALYZE operation.
*
* The type-specific typanalyze function is passed a pointer to this struct
* and must return TRUE to continue analysis, FALSE to skip analysis of this
- * column. In the TRUE case it must set the compute_stats and minrows fields,
+ * column. In the TRUE case it must set the compute_stats and minrows fields,
* and can optionally set extra_data to pass additional info to compute_stats.
* minrows is its request for the minimum number of sample rows to be gathered
* (but note this request might not be honored, eg if there are fewer rows
typedef struct VacAttrStats *VacAttrStatsP;
typedef Datum (*AnalyzeAttrFetchFunc) (VacAttrStatsP stats, int rownum,
- bool *isNull);
+ bool *isNull);
typedef struct VacAttrStats
{
MemoryContext anl_context; /* where to save long-lived data */
/*
- * These fields must be filled in by the typanalyze routine,
- * unless it returns FALSE.
+ * These fields must be filled in by the typanalyze routine, unless it
+ * returns FALSE.
*/
- void (*compute_stats) (VacAttrStatsP stats,
- AnalyzeAttrFetchFunc fetchfunc,
- int samplerows,
- double totalrows);
+ void (*compute_stats) (VacAttrStatsP stats,
+ AnalyzeAttrFetchFunc fetchfunc,
+ int samplerows,
+ double totalrows);
int minrows; /* Minimum # of rows wanted for stats */
void *extra_data; /* for extra type-specific data */
Datum *stavalues[STATISTIC_NUM_SLOTS];
/*
- * These fields are private to the main ANALYZE code and should not
- * be looked at by type-specific functions.
+ * These fields are private to the main ANALYZE code and should not be
+ * looked at by type-specific functions.
*/
int tupattnum; /* attribute number within tuples */
HeapTuple *rows; /* access info for std fetch function */
} VacRUsage;
/* Default statistics target (GUC parameter) */
-extern DLLIMPORT int default_statistics_target; /* DLLIMPORT for PostGIS */
+extern DLLIMPORT int default_statistics_target; /* DLLIMPORT for PostGIS */
/* in commands/vacuum.c */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.111 2004/08/29 04:13:06 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/executor/executor.h,v 1.112 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* prototypes from functions in execMain.c
*/
extern void ExecutorStart(QueryDesc *queryDesc, bool useCurrentSnapshot,
- bool explainOnly);
+ bool explainOnly);
extern TupleTableSlot *ExecutorRun(QueryDesc *queryDesc,
ScanDirection direction, long count);
extern void ExecutorEnd(QueryDesc *queryDesc);
*
* spi.h
*
- * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.46 2004/07/31 20:55:42 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/executor/spi.h,v 1.47 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern int SPI_finish(void);
extern void SPI_push(void);
extern void SPI_pop(void);
-extern int SPI_exec(const char *src, int tcount);
-extern int SPI_execp(void *plan, Datum *values, const char *Nulls,
+extern int SPI_exec(const char *src, int tcount);
+extern int SPI_execp(void *plan, Datum *values, const char *Nulls,
int tcount);
extern int SPI_execp_current(void *plan, Datum *values, const char *Nulls,
- bool useCurrentSnapshot, int tcount);
+ bool useCurrentSnapshot, int tcount);
extern void *SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern void *SPI_saveplan(void *plan);
extern int SPI_freeplan(void *plan);
-extern Oid SPI_getargtypeid(void *plan, int argIndex);
-extern int SPI_getargcount(void *plan);
+extern Oid SPI_getargtypeid(void *plan, int argIndex);
+extern int SPI_getargcount(void *plan);
extern bool SPI_is_cursor_plan(void *plan);
extern const char *SPI_result_code_string(int code);
*
* Copyright (c) 2002-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.13 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/funcapi.h,v 1.14 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
AttInMetadata *attinmeta;
/*
- * memory context used for structures that must live for multiple calls
+ * memory context used for structures that must live for multiple
+ * calls
*
* multi_call_memory_ctx is set by SRF_FIRSTCALL_INIT() for you, and used
* by SRF_RETURN_DONE() for cleanup. It is the most appropriate memory
/*
* OPTIONAL pointer to struct containing tuple description
*
- * tuple_desc is for use when returning tuples (i.e. composite data types)
- * and is only needed if you are going to build the tuples with
- * heap_formtuple() rather than with BuildTupleFromCStrings(). Note that
- * the TupleDesc pointer stored here should usually have been run through
- * BlessTupleDesc() first.
+ * tuple_desc is for use when returning tuples (i.e. composite data
+ * types) and is only needed if you are going to build the tuples with
+ * heap_formtuple() rather than with BuildTupleFromCStrings(). Note
+ * that the TupleDesc pointer stored here should usually have been run
+ * through BlessTupleDesc() first.
*/
- TupleDesc tuple_desc;
+ TupleDesc tuple_desc;
} FuncCallContext;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.20 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/be-fsstubs.h,v 1.21 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
extern void AtEOXact_LargeObject(bool isCommit);
extern void AtEOSubXact_LargeObject(bool isCommit, TransactionId myXid,
- TransactionId parentXid);
+ TransactionId parentXid);
#endif /* BE_FSSTUBS_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.47 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/libpq-be.h,v 1.48 2004/08/29 05:06:56 momjian Exp $
*
*-------------------------------------------------------------------------
*/
ProtocolVersion proto; /* FE/BE protocol version */
SockAddr laddr; /* local addr (postmaster) */
SockAddr raddr; /* remote addr (client) */
- char *remote_host; /* name (or ip addr) of remote host */
- char *remote_port; /* text rep of remote port */
+ char *remote_host; /* name (or ip addr) of remote host */
+ char *remote_port; /* text rep of remote port */
CAC_state canAcceptConnections; /* postmaster connection status */
/*
char cryptSalt[2]; /* Password salt */
/*
- * Information that really has no business at all being in struct Port,
- * but since it gets used by elog.c in the same way as database_name
- * and other members of this struct, we may as well keep it here.
+ * Information that really has no business at all being in struct
+ * Port, but since it gets used by elog.c in the same way as
+ * database_name and other members of this struct, we may as well keep
+ * it here.
*/
- const char *commandTag; /* current command tag */
- struct timeval session_start; /* for session duration logging */
+ const char *commandTag; /* current command tag */
+ struct timeval session_start; /* for session duration logging */
/*
* SSL structures
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/libpq/pqsignal.h,v 1.27 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/libpq/pqsignal.h,v 1.28 2004/08/29 05:06:56 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
#ifndef WIN32
#define PG_SETMASK(mask) sigsetmask(*((int*)(mask)))
#else
-#define PG_SETMASK(mask) pqsigsetmask(*((int*)(mask)))
-int pqsigsetmask(int mask);
+#define PG_SETMASK(mask) pqsigsetmask(*((int*)(mask)))
+int pqsigsetmask(int mask);
#endif
#endif
-/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.50 2004/03/15 10:41:26 ishii Exp $ */
+/* $PostgreSQL: pgsql/src/include/mb/pg_wchar.h,v 1.51 2004/08/29 05:06:56 momjian Exp $ */
#ifndef PG_WCHAR_H
#define PG_WCHAR_H
mb2wchar_with_len_converter mb2wchar_with_len; /* convert a multibyte
* string to a wchar */
mblen_converter mblen; /* returns the length of a multibyte char */
- mbdisplaylen_converter dsplen; /* returns the lenghth of a display length */
+ mbdisplaylen_converter dsplen; /* returns the lenghth of a
+ * display length */
int maxmblen; /* max bytes for a char in this charset */
} pg_wchar_tbl;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.166 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/miscadmin.h,v 1.167 2004/08/29 05:06:55 momjian Exp $
*
* NOTES
* some of the information in this file should be moved to other files.
ProcessInterrupts(); \
} while(0)
-#else /* WIN32 */
+#else /* WIN32 */
#define CHECK_FOR_INTERRUPTS() \
do { \
if (InterruptPending) \
ProcessInterrupts(); \
} while(0)
-
-#endif /* WIN32 */
+#endif /* WIN32 */
#define HOLD_INTERRUPTS() (InterruptHoldoffCount++)
extern char OutputFileName[];
extern char my_exec_path[];
extern char pkglib_path[];
+
#ifdef EXEC_BACKEND
extern char postgres_exec_path[];
#endif
extern int VacuumCostDelay;
extern int VacuumCostBalance;
-extern bool VacuumCostActive;
+extern bool VacuumCostActive;
/* in tcop/postgres.c */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.118 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/execnodes.h,v 1.119 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Basic state for all query types: */
ScanDirection es_direction; /* current scan direction */
Snapshot es_snapshot; /* time qual to use */
- Snapshot es_crosscheck_snapshot; /* crosscheck time qual for RI */
+ Snapshot es_crosscheck_snapshot; /* crosscheck time qual for RI */
List *es_range_table; /* List of RangeTableEntrys */
/* Info about target table for insert/update/delete queries: */
List *es_rowMark; /* not good place, but there is no other */
bool es_instrument; /* true requests runtime instrumentation */
- bool es_select_into; /* true if doing SELECT INTO */
+ bool es_select_into; /* true if doing SELECT INTO */
bool es_into_oids; /* true to generate OIDs in SELECT INTO */
List *es_exprcontexts; /* List of ExprContexts within EState */
typedef struct ExprState ExprState;
typedef Datum (*ExprStateEvalFunc) (ExprState *expression,
- ExprContext *econtext,
- bool *isNull,
- ExprDoneCond *isDone);
+ ExprContext *econtext,
+ bool *isNull,
+ ExprDoneCond *isDone);
struct ExprState
{
NodeTag type;
Expr *expr; /* associated Expr node */
- ExprStateEvalFunc evalfunc; /* routine to run to execute node */
+ ExprStateEvalFunc evalfunc; /* routine to run to execute node */
};
/* ----------------
/*
* Flag to remember whether we have registered a shutdown callback for
- * this FuncExprState. We do so only if setArgsValid has been true at
- * least once (since all the callback is for is to clear setArgsValid).
+ * this FuncExprState. We do so only if setArgsValid has been true at
+ * least once (since all the callback is for is to clear
+ * setArgsValid).
*/
bool shutdown_reg; /* a shutdown callback is registered */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.26 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/params.h,v 1.27 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* kind : the kind of parameter (PARAM_NAMED or PARAM_NUM)
* name : the parameter name (valid if kind == PARAM_NAMED)
- * id : the parameter id (valid if kind == PARAM_NUM)
+ * id : the parameter id (valid if kind == PARAM_NUM)
* ptype : the type of the parameter value
* isnull : true if the value is null (if so 'value' is undefined)
* value : the value that has to be substituted in the place
/* Functions found in src/backend/nodes/params.c */
extern ParamListInfo copyParamList(ParamListInfo from);
extern ParamListInfo lookupParam(ParamListInfo paramList, int thisParamKind,
- const char *thisParamName, AttrNumber thisParamId,
- bool noError);
+ const char *thisParamName, AttrNumber thisParamId,
+ bool noError);
#endif /* PARAMS_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.268 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/parsenodes.h,v 1.269 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* Note: as of Postgres 8.0, we don't support arrays of composite values,
* so cases in which a field select follows a subscript aren't actually
- * semantically legal. However the parser is prepared to handle such.
+ * semantically legal. However the parser is prepared to handle such.
*/
typedef struct A_Indirection
{
*
* In RELATION RTEs, the colnames in both alias and eref are indexed by
* physical attribute number; this means there must be colname entries for
- * dropped columns. When building an RTE we insert empty strings ("") for
- * dropped columns. Note however that a stored rule may have nonempty
+ * dropped columns. When building an RTE we insert empty strings ("") for
+ * dropped columns. Note however that a stored rule may have nonempty
* colnames for columns dropped since the rule was created (and for that
* matter the colnames might be out of date due to column renamings).
* The same comments apply to FUNCTION RTEs when the function's return type
*
* In JOIN RTEs, the colnames in both alias and eref are one-to-one with
* joinaliasvars entries. A JOIN RTE will omit columns of its inputs when
- * those columns are known to be dropped at parse time. Again, however,
+ * those columns are known to be dropped at parse time. Again, however,
* a stored rule might contain entries for columns dropped since the rule
- * was created. (This is only possible for columns not actually referenced
+ * was created. (This is only possible for columns not actually referenced
* in the rule.)
*
* inh is TRUE for relation references that should be expanded to include
* expansion of '*'.
*
* requiredPerms and checkAsUser specify run-time access permissions
- * checks to be performed at query startup. The user must have *all*
+ * checks to be performed at query startup. The user must have *all*
* of the permissions that are OR'd together in requiredPerms (zero
* indicates no permissions checking). If checkAsUser is not zero,
* then do the permissions checks using the access rights of that user,
MUST_NOT_HAVE_OIDS, /* WITHOUT OIDS explicitely specified */
DEFAULT_OIDS /* neither specified; use the default,
* which is the value of the
- * default_with_oids GUC var
- */
+ * default_with_oids GUC var */
} ContainsOids;
typedef struct SelectStmt
/*
* These fields are used only in "leaf" SelectStmts.
*
- * into, intoColNames and intoHasOids are a kluge; they belong
- * somewhere else...
+ * into, intoColNames and intoHasOids are a kluge; they belong somewhere
+ * else...
*/
List *distinctClause; /* NULL, list of DISTINCT ON exprs, or
* lcons(NIL,NIL) for all (SELECT
NodeTag type;
char *schemaname; /* the name of the schema to create */
char *authid; /* the owner of the created schema */
- char *tablespacename; /* default tablespace for schema, or NULL */
+ char *tablespacename; /* default tablespace for schema, or NULL */
List *schemaElts; /* schema components (list of parsenodes) */
} CreateSchemaStmt;
AT_AddIndex, /* add index */
AT_ReAddIndex, /* internal to commands/tablecmds.c */
AT_AddConstraint, /* add constraint */
- AT_ProcessedConstraint, /* pre-processed add constraint
- * (local in parser/analyze.c) */
+ AT_ProcessedConstraint, /* pre-processed add constraint (local in
+ * parser/analyze.c) */
AT_DropConstraint, /* drop constraint */
AT_DropConstraintQuietly, /* drop constraint, no error/warning
* (local in commands/tablecmds.c) */
List *inhRelations; /* relations to inherit from (list of
* inhRelation) */
List *constraints; /* constraints (list of Constraint nodes) */
- ContainsOids hasoids; /* should it have OIDs? */
+ ContainsOids hasoids; /* should it have OIDs? */
OnCommitAction oncommit; /* what do we do at COMMIT? */
- char *tablespacename; /* table space to use, or NULL */
+ char *tablespacename; /* table space to use, or NULL */
} CreateStmt;
/* ----------
/* ----------------------
- * Create/Drop Table Space Statements
+ * Create/Drop Table Space Statements
* ----------------------
*/
} RenameStmt;
/* ----------------------
- * Alter Object Owner Statement
+ * Alter Object Owner Statement
* ----------------------
*/
typedef struct AlterOwnerStmt
NodeTag type;
List *relations; /* relations to lock */
int mode; /* lock mode */
- bool nowait; /* no wait mode */
+ bool nowait; /* no wait mode */
} LockStmt;
/* ----------------------
* We support three types of lists:
*
* T_List: lists of pointers
- * (in practice usually pointers to Nodes, but not always;
- * declared as "void *" to minimize casting annoyances)
+ * (in practice usually pointers to Nodes, but not always;
+ * declared as "void *" to minimize casting annoyances)
* T_IntList: lists of integers
* T_OidList: lists of Oids
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/pg_list.h,v 1.48 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/pg_list.h,v 1.49 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct List
{
- NodeTag type; /* T_List, T_IntList, or T_OidList */
- int length;
- ListCell *head;
- ListCell *tail;
+ NodeTag type; /* T_List, T_IntList, or T_OidList */
+ int length;
+ ListCell *head;
+ ListCell *tail;
} List;
struct ListCell
{
union
{
- void *ptr_value;
- int int_value;
- Oid oid_value;
- } data;
- ListCell *next;
+ void *ptr_value;
+ int int_value;
+ Oid oid_value;
+ } data;
+ ListCell *next;
};
/*
extern ListCell *list_head(List *l);
extern ListCell *list_tail(List *l);
-extern int list_length(List *l);
-
-#endif /* __GNUC__ */
+extern int list_length(List *l);
+#endif /* __GNUC__ */
/*
* NB: There is an unfortunate legacy from a previous incarnation of
#define list_make1_int(x1) lcons_int(x1, NIL)
#define list_make2_int(x1,x2) lcons_int(x1, list_make1_int(x2))
#define list_make3_int(x1,x2,x3) lcons_int(x1, list_make2_int(x2, x3))
-#define list_make4_int(x1,x2,x3,x4) lcons_int(x1, list_make3_int(x2, x3, x4))
+#define list_make4_int(x1,x2,x3,x4) lcons_int(x1, list_make3_int(x2, x3, x4))
#define list_make1_oid(x1) lcons_oid(x1, NIL)
#define list_make2_oid(x1,x2) lcons_oid(x1, list_make1_oid(x2))
#define list_make3_oid(x1,x2,x3) lcons_oid(x1, list_make2_oid(x2, x3))
-#define list_make4_oid(x1,x2,x3,x4) lcons_oid(x1, list_make3_oid(x2, x3, x4))
+#define list_make4_oid(x1,x2,x3,x4) lcons_oid(x1, list_make3_oid(x2, x3, x4))
/*
* foreach -
/*
* forboth -
- * a convenience macro for advancing through two linked lists
- * simultaneously. This macro loops through both lists at the same
- * time, stopping when either list runs out of elements. Depending
- * on the requirements of the call site, it may also be wise to
- * assert that the lengths of the two lists are equal.
+ * a convenience macro for advancing through two linked lists
+ * simultaneously. This macro loops through both lists at the same
+ * time, stopping when either list runs out of elements. Depending
+ * on the requirements of the call site, it may also be wise to
+ * assert that the lengths of the two lists are equal.
*/
#define forboth(cell1, list1, cell2, list2) \
for ((cell1) = list_head(list1), (cell2) = list_head(list2); \
extern List *list_truncate(List *list, int new_size);
extern void *list_nth(List *list, int n);
-extern int list_nth_int(List *list, int n);
-extern Oid list_nth_oid(List *list, int n);
+extern int list_nth_int(List *list, int n);
+extern Oid list_nth_oid(List *list, int n);
extern bool list_member(List *list, void *datum);
extern bool list_member_ptr(List *list, void *datum);
#define listCopy(list) list_copy(list)
-extern int length(List *list);
-
-#endif /* ENABLE_LIST_COMPAT */
+extern int length(List *list);
+#endif /* ENABLE_LIST_COMPAT */
#endif /* PG_LIST_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.74 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/plannodes.h,v 1.75 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
List *indxstrategy; /* list of sublists of strategy numbers */
List *indxsubtype; /* list of sublists of strategy subtypes */
List *indxlossy; /* list of sublists of lossy flags (ints) */
- ScanDirection indxorderdir; /* forward or backward or don't care */
+ ScanDirection indxorderdir; /* forward or backward or don't care */
} IndexScan;
/* ----------------
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.103 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/primnodes.h,v 1.104 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Notes:
*
* In a SELECT's targetlist, resno should always be equal to the item's
- * ordinal position (counting from 1). However, in an INSERT or UPDATE
+ * ordinal position (counting from 1). However, in an INSERT or UPDATE
* targetlist, resno represents the attribute number of the destination
* column for the item; so there may be missing or out-of-order resnos.
* It is even legal to have duplicated resnos; consider
* UPDATE table SET arraycol[1] = ..., arraycol[2] = ..., ...
* The two meanings come together in the executor, because the planner
* transforms INSERT/UPDATE tlists into a normalized form with exactly
- * one entry for each column of the destination table. Before that's
+ * one entry for each column of the destination table. Before that's
* happened, however, it is risky to assume that resno == position.
* Generally get_tle_by_resno() should be used rather than list_nth()
* to fetch tlist entries by resno, and only in SELECT should you assume
* resname is required to represent the correct column name in non-resjunk
* entries of top-level SELECT targetlists, since it will be used as the
* column title sent to the frontend. In most other contexts it is only
- * a debugging aid, and may be wrong or even NULL. (In particular, it may
+ * a debugging aid, and may be wrong or even NULL. (In particular, it may
* be wrong in a tlist from a stored rule, if the referenced column has been
- * renamed by ALTER TABLE since the rule was made. Also, the planner tends
+ * renamed by ALTER TABLE since the rule was made. Also, the planner tends
* to store NULL rather than look up a valid name for tlist entries in
* non-toplevel plan nodes.) In resjunk entries, resname should be either
* a specific system-generated name (such as "ctid") or NULL; anything else
*
* Note: colnames is a list of Value nodes (always strings). In Alias structs
* associated with RTEs, there may be entries corresponding to dropped
- * columns; these are normally empty strings (""). See parsenodes.h for info.
+ * columns; these are normally empty strings (""). See parsenodes.h for info.
*/
typedef struct Alias
{
* portion of a column.
*
* A single FieldStore can actually represent updates of several different
- * fields. The parser only generates FieldStores with single-element lists,
+ * fields. The parser only generates FieldStores with single-element lists,
* but the planner will collapse multiple updates of the same base column
* into one FieldStore.
* ----------------
* and the testexpr in the second case.
*
* In the raw grammar output for the second form, the condition expressions
- * of the WHEN clauses are just the comparison values. Parse analysis
+ * of the WHEN clauses are just the comparison values. Parse analysis
* converts these to valid boolean expressions of the form
* CaseTestExpr '=' compexpr
* where the CaseTestExpr node is a placeholder that emits the correct
*
* Note: the list of fields must have a one-for-one correspondence with
* physical fields of the associated rowtype, although it is okay for it
- * to be shorter than the rowtype. That is, the N'th list element must
+ * to be shorter than the rowtype. That is, the N'th list element must
* match up with the N'th physical field. When the N'th physical field
* is a dropped column (attisdropped) then the N'th list element can just
- * be a NULL constant. (This case can only occur for named composite types,
+ * be a NULL constant. (This case can only occur for named composite types,
* not RECORD types, since those are built from the RowExpr itself rather
* than vice versa.) It is important not to assume that length(args) is
* the same as the number of columns logically present in the rowtype.
Expr xpr;
List *args; /* the fields */
Oid row_typeid; /* RECORDOID or a composite type's ID */
+
/*
* Note: we deliberately do NOT store a typmod. Although a typmod
* will be associated with specific RECORD types at runtime, it will
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.98 2004/08/29 04:13:07 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/relation.h,v 1.99 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* The indexprs and indpred expressions have been run through
* prepqual.c and eval_const_expressions() for ease of matching to
- * WHERE clauses. indpred is in implicit-AND form.
+ * WHERE clauses. indpred is in implicit-AND form.
*/
typedef struct IndexOptInfo
*
* This is unlike the other Path nodes in that it can actually generate
* different plans: either hash-based or sort-based implementation, or a
- * no-op if the input path can be proven distinct already. The decision
+ * no-op if the input path can be proven distinct already. The decision
* is sufficiently localized that it's not worth having separate Path node
* types. (Note: in the no-op case, we could eliminate the UniquePath node
* entirely and just return the subpath; but it's convenient to have a
* because they used no other rels. That's what the is_pushed_down flag is
* for; it tells us that a qual came from a point above the join of the
* specific set of base rels that it uses (or that the JoinInfo structures
- * claim it uses). A clause that originally came from WHERE will *always*
+ * claim it uses). A clause that originally came from WHERE will *always*
* have its is_pushed_down flag set; a clause that came from an INNER JOIN
* condition, but doesn't use all the rels being joined, will also have
* is_pushed_down set because it will get attached to some lower joinrel.
Expr *clause; /* the represented clause of WHERE or JOIN */
- bool is_pushed_down; /* TRUE if clause was pushed down in level */
+ bool is_pushed_down; /* TRUE if clause was pushed down in level */
- bool valid_everywhere; /* TRUE if valid on every level */
+ bool valid_everywhere; /* TRUE if valid on every level */
/*
* This flag is set true if the clause looks potentially useful as a
* merge or hash join clause, that is if it is a binary opclause with
- * nonoverlapping sets of relids referenced in the left and right sides.
- * (Whether the operator is actually merge or hash joinable isn't
- * checked, however.)
+ * nonoverlapping sets of relids referenced in the left and right
+ * sides. (Whether the operator is actually merge or hash joinable
+ * isn't checked, however.)
*/
bool can_join;
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/nodes/value.h,v 1.2 2004/07/12 01:00:45 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/nodes/value.h,v 1.3 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Value *makeString(char *str);
extern Value *makeBitString(char *str);
-#endif /* VALUE_H */
+#endif /* VALUE_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.36 2004/08/29 04:13:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/geqo.h,v 1.37 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*
* If you change these, update backend/utils/misc/postgresql.sample.conf
*/
-extern int Geqo_effort; /* 1 .. 10, knob for adjustment of defaults */
+extern int Geqo_effort; /* 1 .. 10, knob for adjustment of
+ * defaults */
#define DEFAULT_GEQO_EFFORT 5
#define MIN_GEQO_EFFORT 1
/* routines in geqo_eval.c */
extern Cost geqo_eval(Gene *tour, int num_gene, GeqoEvalData *evaldata);
extern RelOptInfo *gimme_tree(Gene *tour, int num_gene,
- GeqoEvalData *evaldata);
+ GeqoEvalData *evaldata);
#endif /* GEQO_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.74 2004/08/29 04:13:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/paths.h,v 1.75 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Path *best_inner_indexscan(Query *root, RelOptInfo *rel,
Relids outer_relids, JoinType jointype);
extern List *group_clauses_by_indexkey_for_or(RelOptInfo *rel,
- IndexOptInfo *index,
- Expr *orsubclause);
+ IndexOptInfo *index,
+ Expr *orsubclause);
extern List *expand_indexqual_conditions(IndexOptInfo *index,
List *clausegroups);
extern void check_partial_indexes(Query *root, RelOptInfo *rel);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.30 2004/08/29 04:13:08 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/planner.h,v 1.31 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "nodes/plannodes.h"
-extern ParamListInfo PlannerBoundParamList; /* current boundParams */
+extern ParamListInfo PlannerBoundParamList; /* current boundParams */
extern Plan *planner(Query *parse, bool isCursor, int cursorOptions,
- ParamListInfo boundParams);
+ ParamListInfo boundParams);
extern Plan *subquery_planner(Query *parse, double tuple_fraction);
#endif /* PLANNER_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.24 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/optimizer/restrictinfo.h,v 1.25 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "nodes/relation.h"
extern RestrictInfo *make_restrictinfo(Expr *clause, bool is_pushed_down,
- bool valid_everywhere);
+ bool valid_everywhere);
extern List *make_restrictinfo_from_indexclauses(List *indexclauses,
- bool is_pushed_down,
- bool valid_everywhere);
+ bool is_pushed_down,
+ bool valid_everywhere);
extern bool restriction_is_or_clause(RestrictInfo *restrictinfo);
extern List *get_actual_clauses(List *restrictinfo_list);
extern void get_actual_join_clauses(List *restrictinfo_list,
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.40 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_node.h,v 1.41 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern ParseState *make_parsestate(ParseState *parentParseState);
extern Var *make_var(ParseState *pstate, RangeTblEntry *rte, int attrno);
-extern Oid transformArrayType(Oid arrayType);
+extern Oid transformArrayType(Oid arrayType);
extern ArrayRef *transformArraySubscripts(ParseState *pstate,
Node *arrayBase,
Oid arrayType,
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parse_relation.h,v 1.46 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parse_relation.h,v 1.47 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
RangeTblEntry *rte,
int *sublevels_up);
extern RangeTblEntry *GetRTEByRangeTablePosn(ParseState *pstate,
- int varno,
- int sublevels_up);
+ int varno,
+ int sublevels_up);
extern List *GetLevelNRangeTable(ParseState *pstate, int sublevels_up);
extern Node *scanRTEForColumn(ParseState *pstate, RangeTblEntry *rte,
- char *colname);
+ char *colname);
extern Node *colNameToVar(ParseState *pstate, char *colname, bool localonly);
extern Node *qualifiedNameToVar(ParseState *pstate,
char *schemaname,
bool addToJoinList, bool addToNameSpace);
extern RangeTblEntry *addImplicitRTE(ParseState *pstate, RangeVar *relation);
extern void expandRTE(List *rtable, int rtindex, int sublevels_up,
- bool include_dropped,
- List **colnames, List **colvars);
+ bool include_dropped,
+ List **colnames, List **colvars);
extern List *expandRelAttrs(ParseState *pstate, List *rtable,
- int rtindex, int sublevels_up);
+ int rtindex, int sublevels_up);
extern int attnameAttNum(Relation rd, const char *attname, bool sysColOK);
extern Name attnumAttName(Relation rd, int attid);
extern Oid attnumTypeId(Relation rd, int attid);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/parsetree.h,v 1.27 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/parsetree.h,v 1.28 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* get_rte_attribute_type will fail on such an attr)
*/
extern bool get_rte_attribute_is_dropped(List *rtable, int rtindex,
- AttrNumber attnum);
+ AttrNumber attnum);
/* ----------------
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/parser/scansup.h,v 1.16 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/parser/scansup.h,v 1.17 2004/08/29 05:06:57 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern char *scanstr(const char *s);
extern char *downcase_truncate_identifier(const char *ident, int len,
- bool warn);
+ bool warn);
extern void truncate_identifier(char *ident, int len, bool warn);
* for developers. If you edit any of these, be sure to do a *full*
* rebuild (and an initdb if noted).
*
- * $PostgreSQL: pgsql/src/include/pg_config_manual.h,v 1.13 2004/05/21 05:08:03 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/pg_config_manual.h,v 1.14 2004/08/29 05:06:55 momjian Exp $
*------------------------------------------------------------------------
*/
#define RELSEG_SIZE (0x40000000 / BLCKSZ)
/*
- * XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
+ * XLOG_SEG_SIZE is the size of a single WAL file. This must be a power of 2
* and larger than BLCKSZ (preferably, a great deal larger than BLCKSZ).
*
* Changing XLOG_SEG_SIZE requires an initdb.
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/include/pgtime.h,v 1.3 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/pgtime.h,v 1.4 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef int64 pg_time_t;
-struct pg_tm {
- int tm_sec;
- int tm_min;
- int tm_hour;
- int tm_mday;
- int tm_mon; /* origin 0, not 1 */
- int tm_year; /* relative to 1900 */
- int tm_wday;
- int tm_yday;
- int tm_isdst;
- long int tm_gmtoff;
+struct pg_tm
+{
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon; /* origin 0, not 1 */
+ int tm_year; /* relative to 1900 */
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
const char *tm_zone;
};
extern struct pg_tm *pg_gmtime(const pg_time_t *);
extern bool pg_tzset(const char *tzname);
extern size_t pg_strftime(char *s, size_t max, const char *format,
- const struct pg_tm *tm);
+ const struct pg_tm * tm);
extern void pg_timezone_initialize(void);
extern bool tz_acceptable(void);
extern const char *select_default_timezone(void);
extern const char *pg_get_current_timezone(void);
-#endif /* _PGTIME_H */
+#endif /* _PGTIME_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/port.h,v 1.55 2004/08/29 04:13:03 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/port.h,v 1.56 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
/* non-blocking */
-bool set_noblock(int sock);
+bool set_noblock(int sock);
/* Portable path handling for Unix/Win32 */
/*
* is_absolute_path
*
- * This capability is needed by libpq and initdb.c
+ * This capability is needed by libpq and initdb.c
* On Win32, you can't reference the same object file that is
* in two different libraries (pgport and libpq), so a macro is best.
*/
/* Portable way to find binaries */
-extern int find_my_exec(const char *argv0, char *retpath);
+extern int find_my_exec(const char *argv0, char *retpath);
extern int find_other_exec(const char *argv0, const char *target,
- const char *versionstr, char *retpath);
+ const char *versionstr, char *retpath);
+
#if defined(WIN32) || defined(__CYGWIN__)
#define EXE ".exe"
#define DEVNULL "nul"
#endif
#ifdef WIN32
-#define HOMEDIR "USERPROFILE"
+#define HOMEDIR "USERPROFILE"
#else
-#define HOMEDIR "HOME"
+#define HOMEDIR "HOME"
#endif
/* Portable delay handling */
#define piperead(a,b,c) read(a,b,c)
#define pipewrite(a,b,c) write(a,b,c)
#else
-extern int pgpipe(int handles[2]);
-extern int piperead(int s, char* buf, int len);
+extern int pgpipe(int handles[2]);
+extern int piperead(int s, char *buf, int len);
+
#define pipewrite(a,b,c) send(a,b,c,0)
#define PG_SIGNAL_COUNT 32
-#define kill(pid,sig) pgkill(pid,sig)
-extern int pgkill(int pid, int sig);
+#define kill(pid,sig) pgkill(pid,sig)
+extern int pgkill(int pid, int sig);
#endif
-extern int pclose_check(FILE *stream);
+extern int pclose_check(FILE *stream);
#if defined(WIN32) || defined(__CYGWIN__)
/*
extern int pgrename(const char *from, const char *to);
extern int pgunlink(const char *path);
extern int pgsymlink(const char *oldpath, const char *newpath);
+
#define rename(from, to) pgrename(from, to)
#define unlink(path) pgunlink(path)
#define symlink(oldpath, newpath) pgsymlink(oldpath, newpath)
-
#endif
extern bool rmtree(char *path, bool rmtopdir);
/* open() replacement to allow delete of held files */
#if !defined(_MSC_VER) && !defined(__BORLANDC__)
-extern int win32_open(const char*,int,...);
-#define open(a,b,...) win32_open(a,b,##__VA_ARGS__)
+extern int win32_open(const char *, int,...);
+
+#define open(a,b,...) win32_open(a,b,##__VA_ARGS__)
#endif
#ifndef __BORLANDC__
extern int copydir(char *fromdir, char *todir);
/* Missing rand functions */
-extern long lrand48(void);
-extern void srand48(long seed);
+extern long lrand48(void);
+extern void srand48(long seed);
/* Last parameter not used */
extern int gettimeofday(struct timeval * tp, struct timezone * tzp);
#ifndef WIN32
extern int pqGetpwuid(uid_t uid, struct passwd * resultbuf, char *buffer,
- size_t buflen, struct passwd **result);
+ size_t buflen, struct passwd ** result);
#endif
extern int pqGethostbyname(const char *name,
- struct hostent *resultbuf,
+ struct hostent * resultbuf,
char *buffer, size_t buflen,
- struct hostent **result,
+ struct hostent ** result,
int *herrno);
-
#ifdef MIPSEB
#define BYTE_ORDER BIG_ENDIAN
#endif
+
#endif
#endif
#ifndef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
+
#endif
#endif
#ifndef BYTE_ORDER
#define BYTE_ORDER LITTLE_ENDIAN
+
#endif
-/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.28 2004/08/29 00:38:03 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/include/port/win32.h,v 1.29 2004/08/29 05:06:57 momjian Exp $ */
/* undefine and redefine after #include */
#undef mkdir
#define DLLIMPORT __declspec (dllimport)
#endif
-#elif defined(WIN32) && (defined(_MSC_VER) || defined(__BORLANDC__)) /* not CYGWIN or MingW */
+#elif defined(WIN32) && (defined(_MSC_VER) || defined(__BORLANDC__)) /* not CYGWIN or MingW */
#if defined(_DLL)
#define DLLIMPORT __declspec (dllexport)
/* In backend/port/win32/signal.c */
-void pgwin32_signal_initialize(void);
+void pgwin32_signal_initialize(void);
extern DLLIMPORT HANDLE pgwin32_signal_event;
-void pgwin32_dispatch_queued_signals(void);
-void pg_queue_signal(int signum);
+void pgwin32_dispatch_queued_signals(void);
+void pg_queue_signal(int signum);
#define sigmask(sig) ( 1 << (sig-1) )
#ifndef FRONTEND
#define pg_usleep(t) pgwin32_backend_usleep(t)
-void pgwin32_backend_usleep(long microsec);
+void pgwin32_backend_usleep(long microsec);
#endif
/* In backend/port/win32/socket.c */
#define recv(s, buf, len, flags) pgwin32_recv(s, buf, len, flags)
#define send(s, buf, len, flags) pgwin32_send(s, buf, len, flags)
-SOCKET pgwin32_socket(int af, int type, int protocol);
-SOCKET pgwin32_accept(SOCKET s, struct sockaddr* addr, int* addrlen);
-int pgwin32_connect(SOCKET s, const struct sockaddr* name, int namelen);
-int pgwin32_select(int nfds, fd_set* readfs, fd_set* writefds, fd_set* exceptfds, const struct timeval* timeout);
-int pgwin32_recv(SOCKET s, char* buf, int len, int flags);
-int pgwin32_send(SOCKET s, char* buf, int len, int flags);
+SOCKET pgwin32_socket(int af, int type, int protocol);
+SOCKET pgwin32_accept(SOCKET s, struct sockaddr * addr, int *addrlen);
+int pgwin32_connect(SOCKET s, const struct sockaddr * name, int namelen);
+int pgwin32_select(int nfds, fd_set *readfs, fd_set *writefds, fd_set *exceptfds, const struct timeval * timeout);
+int pgwin32_recv(SOCKET s, char *buf, int len, int flags);
+int pgwin32_send(SOCKET s, char *buf, int len, int flags);
const char *pgwin32_socket_strerror(int err);
/* in backend/port/win32/security.c */
-extern int pgwin32_is_admin(void);
-extern int pgwin32_is_service(void);
+extern int pgwin32_is_admin(void);
+extern int pgwin32_is_service(void);
#endif
/* in backend/port/win32/error.c */
-void _dosmaperr(unsigned long);
+void _dosmaperr(unsigned long);
-#define WEXITSTATUS(w) (((w) >> 8) & 0xff)
-#define WIFEXITED(w) (((w) & 0xff) == 0)
-#define WIFSIGNALED(w) (((w) & 0x7f) > 0 && (((w) & 0x7f) < 0x7f))
-#define WTERMSIG(w) ((w) & 0x7f)
+#define WEXITSTATUS(w) (((w) >> 8) & 0xff)
+#define WIFEXITED(w) (((w) & 0xff) == 0)
+#define WIFSIGNALED(w) (((w) & 0x7f) > 0 && (((w) & 0x7f) < 0x7f))
+#define WTERMSIG(w) ((w) & 0x7f)
/* Some extra signals */
#define SIGHUP 1
/* for setitimer in backend/port/win32/timer.c */
#define ITIMER_REAL 0
-struct itimerval {
+struct itimerval
+{
struct timeval it_interval;
struct timeval it_value;
};
-int setitimer(int which, const struct itimerval *value, struct itimerval *ovalue);
+int setitimer(int which, const struct itimerval * value, struct itimerval * ovalue);
/* FROM SRA */
#define ECONNREFUSED WSAECONNREFUSED
#define EBADFD WSAENOTSOCK
#define EOPNOTSUPP WSAEOPNOTSUPP
-
* use header files that are otherwise internal to Postgres to interface
* with the backend.
*
- * $PostgreSQL: pgsql/src/include/postgres_ext.h,v 1.15 2004/03/21 22:29:11 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postgres_ext.h,v 1.16 2004/08/29 05:06:55 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
#define PG_DIAG_SEVERITY 'S'
#define PG_DIAG_SQLSTATE 'C'
-#define PG_DIAG_MESSAGE_PRIMARY 'M'
+#define PG_DIAG_MESSAGE_PRIMARY 'M'
#define PG_DIAG_MESSAGE_DETAIL 'D'
#define PG_DIAG_MESSAGE_HINT 'H'
#define PG_DIAG_STATEMENT_POSITION 'P'
#define PG_DIAG_CONTEXT 'W'
#define PG_DIAG_SOURCE_FILE 'F'
#define PG_DIAG_SOURCE_LINE 'L'
-#define PG_DIAG_SOURCE_FUNCTION 'R'
+#define PG_DIAG_SOURCE_FUNCTION 'R'
#endif
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/postmaster/postmaster.h,v 1.6 2004/08/29 04:13:09 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/postmaster.h,v 1.7 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern int PostmasterMain(int argc, char *argv[]);
extern void ClosePostmasterPorts(bool am_syslogger);
+
#ifdef EXEC_BACKEND
extern pid_t postmaster_forkexec(int argc, char *argv[]);
extern int SubPostmasterMain(int argc, char *argv[]);
*
* Copyright (c) 2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.1 2004/08/05 23:32:12 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/postmaster/syslogger.h,v 1.2 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool am_syslogger;
#ifndef WIN32
-extern int syslogPipe[2];
+extern int syslogPipe[2];
+
#else
extern HANDLE syslogPipe[2];
#endif
-extern int SysLogger_Start(void);
+extern int SysLogger_Start(void);
extern void write_syslogger_file(const char *buffer, int count);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/rewrite/rewriteManip.h,v 1.37 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/rewrite/rewriteManip.h,v 1.38 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool checkExprHasSubLink(Node *node);
extern Node *ResolveNew(Node *node, int target_varno, int sublevels_up,
- List *target_rtable,
- List *targetlist, int event, int update_varno);
+ List *target_rtable,
+ List *targetlist, int event, int update_varno);
#endif /* REWRITEMANIP_H */
*
* buf_internals.h
* Internal definitions for buffer manager and the buffer replacement
- * strategy.
+ * strategy.
*
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.72 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.73 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Flags for buffer descriptors
*/
-#define BM_DIRTY (1 << 0) /* data needs writing */
-#define BM_VALID (1 << 1) /* data is valid */
-#define BM_IO_IN_PROGRESS (1 << 2) /* read or write in progress */
-#define BM_IO_ERROR (1 << 3) /* previous I/O failed */
-#define BM_JUST_DIRTIED (1 << 4) /* dirtied since write started */
-#define BM_PIN_COUNT_WAITER (1 << 5) /* have waiter for sole pin */
+#define BM_DIRTY (1 << 0) /* data needs writing */
+#define BM_VALID (1 << 1) /* data is valid */
+#define BM_IO_IN_PROGRESS (1 << 2) /* read or write in
+ * progress */
+#define BM_IO_ERROR (1 << 3) /* previous I/O failed */
+#define BM_JUST_DIRTIED (1 << 4) /* dirtied since write
+ * started */
+#define BM_PIN_COUNT_WAITER (1 << 5) /* have waiter for sole
+ * pin */
typedef bits16 BufFlags;
*/
typedef struct
{
- int prev; /* list links */
- int next;
- short list; /* ID of list it is currently in */
- bool t1_vacuum; /* t => present only because of VACUUM */
- TransactionId t1_xid; /* the xid this entry went onto T1 */
- BufferTag buf_tag; /* page identifier */
- int buf_id; /* currently assigned data buffer, or -1 */
+ int prev; /* list links */
+ int next;
+ short list; /* ID of list it is currently in */
+ bool t1_vacuum; /* t => present only because of VACUUM */
+ TransactionId t1_xid; /* the xid this entry went onto T1 */
+ BufferTag buf_tag; /* page identifier */
+ int buf_id; /* currently assigned data buffer, or -1 */
} BufferStrategyCDB;
/*
*/
typedef struct
{
- int target_T1_size; /* What T1 size are we aiming for */
- int listUnusedCDB; /* All unused StrategyCDB */
- int listHead[STRAT_NUM_LISTS]; /* ARC lists B1, T1, T2 and B2 */
- int listTail[STRAT_NUM_LISTS];
- int listSize[STRAT_NUM_LISTS];
- Buffer listFreeBuffers; /* List of unused buffers */
-
- long num_lookup; /* Some hit statistics */
- long num_hit[STRAT_NUM_LISTS];
- time_t stat_report;
+ int target_T1_size; /* What T1 size are we aiming for */
+ int listUnusedCDB; /* All unused StrategyCDB */
+ int listHead[STRAT_NUM_LISTS]; /* ARC lists B1, T1, T2
+ * and B2 */
+ int listTail[STRAT_NUM_LISTS];
+ int listSize[STRAT_NUM_LISTS];
+ Buffer listFreeBuffers; /* List of unused buffers */
+
+ long num_lookup; /* Some hit statistics */
+ long num_hit[STRAT_NUM_LISTS];
+ time_t stat_report;
/* Array of CDB's starts here */
- BufferStrategyCDB cdb[1]; /* VARIABLE SIZE ARRAY */
+ BufferStrategyCDB cdb[1]; /* VARIABLE SIZE ARRAY */
} BufferStrategyControl;
-
+
/* counters in buf_init.c */
extern long int ReadBufferCount;
extern long int ReadLocalBufferCount;
/* freelist.c */
extern BufferDesc *StrategyBufferLookup(BufferTag *tagPtr, bool recheck,
- int *cdb_found_index);
+ int *cdb_found_index);
extern BufferDesc *StrategyGetBuffer(int *cdb_replace_index);
extern void StrategyReplaceBuffer(BufferDesc *buf, BufferTag *newTag,
- int cdb_found_index, int cdb_replace_index);
+ int cdb_found_index, int cdb_replace_index);
extern void StrategyInvalidateBuffer(BufferDesc *buf);
extern void StrategyHintVacuum(bool vacuum_active);
extern int StrategyDirtyBufferList(BufferDesc **buffers, BufferTag *buftags,
- int max_buffers);
+ int max_buffers);
extern void StrategyInitialize(bool init);
/* buf_table.c */
extern void InitBufTable(int size);
-extern int BufTableLookup(BufferTag *tagPtr);
+extern int BufTableLookup(BufferTag *tagPtr);
extern void BufTableInsert(BufferTag *tagPtr, int cdb_id);
extern void BufTableDelete(BufferTag *tagPtr);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.85 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/bufmgr.h,v 1.86 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void FlushRelationBuffers(Relation rel, BlockNumber firstDelBlock);
extern void DropRelationBuffers(Relation rel);
extern void DropRelFileNodeBuffers(RelFileNode rnode, bool istemp,
- BlockNumber firstDelBlock);
+ BlockNumber firstDelBlock);
extern void DropBuffers(Oid dbid);
#ifdef NOT_USED
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.47 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/fd.h,v 1.48 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void closeAllVfds(void);
extern void AtEOXact_Files(void);
extern void AtEOSubXact_Files(bool isCommit, TransactionId myXid,
- TransactionId parentXid);
+ TransactionId parentXid);
extern void RemovePgTempFiles(void);
extern int pg_fsync(int fd);
extern int pg_fdatasync(int fd);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/ipc.h,v 1.67 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/ipc.h,v 1.68 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* ipci.c */
extern void CreateSharedMemoryAndSemaphores(bool makePrivate,
- int maxBackends,
- int port);
+ int maxBackends,
+ int port);
#ifdef EXEC_BACKEND
/* postmaster.c */
-extern size_t ShmemBackendArraySize(void);
-extern void ShmemBackendArrayAllocation(void);
+extern size_t ShmemBackendArraySize(void);
+extern void ShmemBackendArrayAllocation(void);
#endif
#endif /* IPC_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.82 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/lock.h,v 1.83 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef int LOCKMASK;
typedef int LOCKMODE;
+
/* MAX_LOCKMODES cannot be larger than the # of bits in LOCKMASK */
#define MAX_LOCKMODES 10
* Lock methods are identified by LOCKMETHODID.
*/
typedef uint16 LOCKMETHODID;
+
/* MAX_LOCK_METHODS is the number of distinct lock control tables allowed */
#define MAX_LOCK_METHODS 3
#define LockMethodIsValid(lockmethodid) ((lockmethodid) != INVALID_LOCKMETHOD)
-extern int NumLockMethods;
+extern int NumLockMethods;
/*
* This is the control structure for a lock table. It lives in shared
- * memory. Currently, none of these fields change after startup. In addition
+ * memory. Currently, none of these fields change after startup. In addition
* to the LockMethodData, a lock table has a shared "lockHash" table holding
* per-locked-object lock information, and a shared "proclockHash" table
* holding per-lock-holder/waiter lock information.
*/
typedef struct LockMethodData
{
- LWLockId masterLock;
- int numLockModes;
- LOCKMASK conflictTab[MAX_LOCKMODES];
+ LWLockId masterLock;
+ int numLockModes;
+ LOCKMASK conflictTab[MAX_LOCKMODES];
} LockMethodData;
typedef LockMethodData *LockMethod;
/*
* offnum should be part of objId union above, but doing that would
- * increase sizeof(LOCKTAG) due to padding. Currently used by userlocks
- * only.
+ * increase sizeof(LOCKTAG) due to padding. Currently used by
+ * userlocks only.
*/
OffsetNumber offnum;
- LOCKMETHODID lockmethodid; /* needed by userlocks */
+ LOCKMETHODID lockmethodid; /* needed by userlocks */
} LOCKTAG;
*
* Currently, session proclocks are used for user locks and for cross-xact
* locks obtained for VACUUM. Note that a single backend can hold locks
- * under several different XIDs at once (including session locks). We treat
+ * under several different XIDs at once (including session locks). We treat
* such locks as never conflicting (a backend can never block itself).
*
* The holdMask field shows the already-granted locks represented by this
/*
* Each backend also maintains a local hash table with information about each
- * lock it is currently interested in. In particular the local table counts
+ * lock it is currently interested in. In particular the local table counts
* the number of times that lock has been acquired. This allows multiple
* requests for the same lock to be executed without additional accesses to
* shared memory. We also track the number of lock acquisitions per
typedef struct LOCALLOCKOWNER
{
/*
- * Note: owner can be NULL to indicate a non-transactional lock.
- * Must use a forward struct reference to avoid circularity.
+ * Note: owner can be NULL to indicate a non-transactional lock. Must
+ * use a forward struct reference to avoid circularity.
*/
struct ResourceOwnerData *owner;
int nLocks; /* # of times held by this owner */
int nLocks; /* total number of times lock is held */
int numLockOwners; /* # of relevant ResourceOwners */
int maxLockOwners; /* allocated size of array */
- LOCALLOCKOWNER *lockOwners; /* dynamically resizable array */
+ LOCALLOCKOWNER *lockOwners; /* dynamically resizable array */
} LOCALLOCK;
#define LOCALLOCK_LOCKMETHOD(llock) ((llock).tag.lock.lockmethodid)
extern void InitLocks(void);
extern LockMethod GetLocksMethodTable(LOCK *lock);
extern LOCKMETHODID LockMethodTableInit(const char *tabName,
- const LOCKMASK *conflictsP,
- int numModes, int maxBackends);
+ const LOCKMASK *conflictsP,
+ int numModes, int maxBackends);
extern LOCKMETHODID LockMethodTableRename(LOCKMETHODID lockmethodid);
extern bool LockAcquire(LOCKMETHODID lockmethodid, LOCKTAG *locktag,
TransactionId xid, LOCKMODE lockmode, bool dontWait);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.74 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/proc.h,v 1.75 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Each backend advertises up to PGPROC_MAX_CACHED_SUBXIDS TransactionIds
- * for non-aborted subtransactions of its current top transaction. These
+ * for non-aborted subtransactions of its current top transaction. These
* have to be treated as running XIDs by other backends.
*
* We also keep track of whether the cache overflowed (ie, the transaction has
* listed anywhere in the PGPROC array is not a running transaction. Else we
* have to look at pg_subtrans.
*/
-#define PGPROC_MAX_CACHED_SUBXIDS 64 /* XXX guessed-at value */
+#define PGPROC_MAX_CACHED_SUBXIDS 64 /* XXX guessed-at value */
-struct XidCache {
- bool overflowed;
- int nxids;
- TransactionId xids[PGPROC_MAX_CACHED_SUBXIDS];
+struct XidCache
+{
+ bool overflowed;
+ int nxids;
+ TransactionId xids[PGPROC_MAX_CACHED_SUBXIDS];
};
/*
SHM_QUEUE procLocks; /* list of PROCLOCK objects for locks held
* or awaited by this backend */
- struct XidCache subxids; /* cache for subtransaction XIDs */
+ struct XidCache subxids; /* cache for subtransaction XIDs */
};
/* NOTE: "typedef struct PGPROC PGPROC" appears in storage/lock.h. */
} PROC_HDR;
-#define DUMMY_PROC_DEFAULT 0
-#define DUMMY_PROC_BGWRITER 1
-#define NUM_DUMMY_PROCS 2
+#define DUMMY_PROC_DEFAULT 0
+#define DUMMY_PROC_BGWRITER 1
+#define NUM_DUMMY_PROCS 2
/* configurable options */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/relfilenode.h,v 1.10 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/relfilenode.h,v 1.11 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* spcNode identifies the tablespace of the relation. It corresponds to
* pg_tablespace.oid.
*
- * dbNode identifies the database of the relation. It is zero for
+ * dbNode identifies the database of the relation. It is zero for
* "shared" relations (those common to all databases of a cluster).
* Nonzero dbNode values correspond to pg_database.oid.
*
(node1).dbNode == (node2).dbNode && \
(node1).spcNode == (node2).spcNode)
-#endif /* RELFILENODE_H */
+#endif /* RELFILENODE_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.37 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/sinval.h,v 1.38 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int16 id; /* type field --- must be first */
Oid dbId; /* database ID, or 0 if a shared relation */
Oid relId; /* relation ID */
- RelFileNode physId; /* physical file ID */
+ RelFileNode physId; /* physical file ID */
+
/*
* Note: it is likely that RelFileNode will someday be changed to
* include database ID. In that case the dbId field will be redundant
extern TransactionId GetOldestXmin(bool allDbs);
extern int CountActiveBackends(void);
extern int CountEmptyBackendSlots(void);
+
/* Use "struct PGPROC", not PGPROC, to avoid including proc.h here */
extern struct PGPROC *BackendIdGetProc(BackendId procId);
extern void XidCacheRemoveRunningXids(TransactionId xid,
- int nxids, TransactionId *xids);
+ int nxids, TransactionId *xids);
/* signal handler for catchup events (SIGUSR1) */
extern void CatchupInterruptHandler(SIGNAL_ARGS);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.47 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/storage/smgr.h,v 1.48 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
typedef struct SMgrRelationData
{
/* rnode is the hashtable lookup key, so it must be first! */
- RelFileNode smgr_rnode; /* relation physical identifier */
+ RelFileNode smgr_rnode; /* relation physical identifier */
/* additional public fields may someday exist here */
/*
* Fields below here are intended to be private to smgr.c and its
- * submodules. Do not touch them from elsewhere.
+ * submodules. Do not touch them from elsewhere.
*/
int smgr_which; /* storage manager selector */
extern void smgrscheduleunlink(SMgrRelation reln, bool isTemp);
extern void smgrdounlink(SMgrRelation reln, bool isTemp, bool isRedo);
extern void smgrextend(SMgrRelation reln, BlockNumber blocknum, char *buffer,
- bool isTemp);
+ bool isTemp);
extern void smgrread(SMgrRelation reln, BlockNumber blocknum, char *buffer);
extern void smgrwrite(SMgrRelation reln, BlockNumber blocknum, char *buffer,
- bool isTemp);
+ bool isTemp);
extern BlockNumber smgrnblocks(SMgrRelation reln);
extern BlockNumber smgrtruncate(SMgrRelation reln, BlockNumber nblocks,
- bool isTemp);
+ bool isTemp);
extern void smgrimmedsync(SMgrRelation reln);
extern void smgrDoPendingDeletes(bool isCommit);
extern int smgrGetPendingDeletes(bool forCommit, RelFileNode **ptr);
extern bool mdcreate(SMgrRelation reln, bool isRedo);
extern bool mdunlink(RelFileNode rnode, bool isRedo);
extern bool mdextend(SMgrRelation reln, BlockNumber blocknum, char *buffer,
- bool isTemp);
+ bool isTemp);
extern bool mdread(SMgrRelation reln, BlockNumber blocknum, char *buffer);
extern bool mdwrite(SMgrRelation reln, BlockNumber blocknum, char *buffer,
- bool isTemp);
+ bool isTemp);
extern BlockNumber mdnblocks(SMgrRelation reln);
extern BlockNumber mdtruncate(SMgrRelation reln, BlockNumber nblocks,
- bool isTemp);
+ bool isTemp);
extern bool mdimmedsync(SMgrRelation reln);
extern bool mdsync(void);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.70 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/tcopprot.h,v 1.71 2004/08/29 05:06:58 momjian Exp $
*
* OLD COMMENTS
* This file was created so that other c files could get the two
extern List *pg_rewrite_queries(List *querytree_list);
extern Plan *pg_plan_query(Query *querytree, ParamListInfo boundParams);
extern List *pg_plan_queries(List *querytrees, ParamListInfo boundParams,
- bool needSnapshot);
+ bool needSnapshot);
extern bool assign_max_stack_depth(int newval, bool doit, GucSource source);
-
#endif /* BOOTSTRAP_INCLUDE */
extern void die(SIGNAL_ARGS);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.23 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/tcop/utility.h,v 1.24 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void ProcessUtility(Node *parsetree, ParamListInfo params,
- DestReceiver *dest, char *completionTag);
+ DestReceiver *dest, char *completionTag);
extern bool UtilityReturnsTuples(Node *parsetree);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.74 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/acl.h,v 1.75 2004/08/29 05:06:58 momjian Exp $
*
* NOTES
* An ACL array is simply an array of AclItems, representing the union
#define ACLITEM_GET_IDTYPE(item) ((item).ai_privs >> 30)
#define ACL_GRANT_OPTION_FOR(privs) (((AclMode) (privs) & 0x7FFF) << 15)
-#define ACL_OPTION_TO_PRIVS(privs) (((AclMode) (privs) >> 15) & 0x7FFF)
+#define ACL_OPTION_TO_PRIVS(privs) (((AclMode) (privs) >> 15) & 0x7FFF)
#define ACLITEM_SET_PRIVS(item,privs) \
((item).ai_privs = ((item).ai_privs & ~((AclMode) 0x7FFF)) | \
*/
extern Acl *acldefault(GrantObjectType objtype, AclId ownerid);
extern Acl *aclupdate(const Acl *old_acl, const AclItem *mod_aip,
- int modechg, AclId ownerid, DropBehavior behavior);
+ int modechg, AclId ownerid, DropBehavior behavior);
extern Acl *aclnewowner(const Acl *old_acl, AclId oldownerid, AclId newownerid);
-
+
extern AclMode aclmask(const Acl *acl, AclId userid, AclId ownerid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
/*
* SQL functions (from acl.c)
extern char *get_groname(AclId grosysid);
extern AclMode pg_class_aclmask(Oid table_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclMode pg_database_aclmask(Oid db_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclMode pg_proc_aclmask(Oid proc_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclMode pg_language_aclmask(Oid lang_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclMode pg_namespace_aclmask(Oid nsp_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclMode pg_tablespace_aclmask(Oid spc_oid, AclId userid,
- AclMode mask, AclMaskHow how);
+ AclMode mask, AclMaskHow how);
extern AclResult pg_class_aclcheck(Oid table_oid, AclId userid, AclMode mode);
extern AclResult pg_database_aclcheck(Oid db_oid, AclId userid, AclMode mode);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.248 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/builtins.h,v 1.249 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern Datum bttext_pattern_cmp(PG_FUNCTION_ARGS);
/* float.c */
-extern DLLIMPORT int extra_float_digits;
+extern DLLIMPORT int extra_float_digits;
extern double get_float8_infinity(void);
extern float get_float4_infinity(void);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.50 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/datetime.h,v 1.51 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
- * return zero or a positive value on success. On failure, they return
+ * return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
#define DTERR_BAD_FORMAT (-1)
#define DTERR_FIELD_OVERFLOW (-2)
-#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
-#define DTERR_INTERVAL_OVERFLOW (-4)
+#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
+#define DTERR_INTERVAL_OVERFLOW (-4)
#define DTERR_TZDISP_OVERFLOW (-5)
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec);
extern void DateTimeParseError(int dterr, const char *str,
- const char *datatype);
+ const char *datatype);
extern int DetermineLocalTimeZone(struct pg_tm * tm);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.74 2004/08/29 04:13:10 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/elog.h,v 1.75 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define NOTICE 18 /* Helpful messages to users about query
* operation; sent to client and server
* log by default. */
-#define WARNING 19 /* Warnings. NOTICE is for expected messages
- * like implicit sequence creation by SERIAL.
- * WARNING is for unexpected messages.
- */
+#define WARNING 19 /* Warnings. NOTICE is for expected
+ * messages like implicit sequence
+ * creation by SERIAL. WARNING is for
+ * unexpected messages. */
#define ERROR 20 /* user error - abort transaction; return
* to known state */
/* Save ERROR value in PGERROR so it can be restored when Win32 includes
{ \
PG_exception_stack = &local_sigjmp_buf
-#define PG_CATCH() \
+#define PG_CATCH() \
} \
else \
{ \
extern PGErrorVerbosity Log_error_verbosity;
extern char *Log_line_prefix;
-extern int Log_destination;
+extern int Log_destination;
/* Log destination bitmap */
-#define LOG_DESTINATION_STDERR 1
-#define LOG_DESTINATION_SYSLOG 2
+#define LOG_DESTINATION_STDERR 1
+#define LOG_DESTINATION_SYSLOG 2
#define LOG_DESTINATION_EVENTLOG 4
/* Other exported functions */
* not available). Used before ereport/elog can be used
* safely (memory context, GUC load etc)
*/
-extern void write_stderr(const char *fmt,...)
+extern void
+write_stderr(const char *fmt,...)
/* This extension allows gcc to check the format string for consistency with
the supplied arguments. */
__attribute__((format(printf, 1, 2)));
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/utils/errcodes.h,v 1.14 2004/07/31 07:39:20 tgl Exp $
+ * $PostgreSQL: pgsql/src/include/utils/errcodes.h,v 1.15 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#define ERRCODE_INDICATOR_OVERFLOW MAKE_SQLSTATE('2','2', '0','2','2')
#define ERRCODE_INTERVAL_FIELD_OVERFLOW MAKE_SQLSTATE('2','2', '0','1','5')
#define ERRCODE_INVALID_ARGUMENT_FOR_LOG MAKE_SQLSTATE('2','2', '0','1','E')
-#define ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION MAKE_SQLSTATE('2','2', '0', '1', 'F')
+#define ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION MAKE_SQLSTATE('2','2', '0', '1', 'F')
#define ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION MAKE_SQLSTATE('2','2', '0', '1', 'G')
#define ERRCODE_INVALID_CHARACTER_VALUE_FOR_CAST MAKE_SQLSTATE('2','2', '0','1','8')
#define ERRCODE_INVALID_DATETIME_FORMAT MAKE_SQLSTATE('2','2', '0','0','7')
* Copyright (c) 2000-2004, PostgreSQL Global Development Group
* Written by Peter Eisentraut
.
*
- * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.50 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/guc.h,v 1.51 2004/08/29 05:06:58 momjian Exp $
*--------------------------------------------------------------------
*/
#ifndef GUC_H
*
* PGC_S_TEST is used when testing values to be stored as per-database or
* per-user defaults ("doit" will always be false, so this never gets stored
- * as the actual source of any value). This is an interactive case, but
+ * as the actual source of any value). This is an interactive case, but
* it needs its own source value because some assign hooks need to make
* different validity checks in this case.
*/
PGC_S_SESSION /* SET command */
} GucSource;
-typedef const char* (*GucStringAssignHook)(const char *newval, bool doit, GucSource source);
-typedef bool (*GucBoolAssignHook)(bool newval, bool doit, GucSource source);
-typedef bool (*GucIntAssignHook)(int newval, bool doit, GucSource source);
-typedef bool (*GucRealAssignHook)(double newval, bool doit, GucSource source);
+typedef const char *(*GucStringAssignHook) (const char *newval, bool doit, GucSource source);
+typedef bool (*GucBoolAssignHook) (bool newval, bool doit, GucSource source);
+typedef bool (*GucIntAssignHook) (int newval, bool doit, GucSource source);
+typedef bool (*GucRealAssignHook) (double newval, bool doit, GucSource source);
-typedef const char* (*GucShowHook)(void);
+typedef const char *(*GucShowHook) (void);
#define GUC_QUALIFIER_SEPARATOR '.'
GucContext context, GucSource source);
extern void DefineCustomBoolVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- bool* valueAddr,
- GucContext context,
- GucBoolAssignHook assign_hook,
- GucShowHook show_hook);
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ bool *valueAddr,
+ GucContext context,
+ GucBoolAssignHook assign_hook,
+ GucShowHook show_hook);
extern void DefineCustomIntVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- int* valueAddr,
- GucContext context,
- GucIntAssignHook assign_hook,
- GucShowHook show_hook);
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ int *valueAddr,
+ GucContext context,
+ GucIntAssignHook assign_hook,
+ GucShowHook show_hook);
extern void DefineCustomRealVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- double* valueAddr,
- GucContext context,
- GucRealAssignHook assign_hook,
- GucShowHook show_hook);
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ double *valueAddr,
+ GucContext context,
+ GucRealAssignHook assign_hook,
+ GucShowHook show_hook);
extern void DefineCustomStringVariable(
- const char* name,
- const char* short_desc,
- const char* long_desc,
- char** valueAddr,
- GucContext context,
- GucStringAssignHook assign_hook,
- GucShowHook show_hook);
+ const char *name,
+ const char *short_desc,
+ const char *long_desc,
+ char **valueAddr,
+ GucContext context,
+ GucStringAssignHook assign_hook,
+ GucShowHook show_hook);
-extern void EmitWarningsOnPlaceholders(const char* className);
+extern void EmitWarningsOnPlaceholders(const char *className);
extern const char *GetConfigOption(const char *name);
extern const char *GetConfigOptionResetString(const char *name);
/* in utils/adt/datetime.c */
extern bool ClearDateCache(bool newval, bool doit, GucSource source);
+
/* in utils/adt/regexp.c */
extern const char *assign_regex_flavor(const char *value,
bool doit, GucSource source);
+
/* in catalog/namespace.c */
extern const char *assign_search_path(const char *newval,
bool doit, GucSource source);
+
/* in access/transam/xlog.c */
extern const char *assign_xlog_sync_method(const char *method,
bool doit, GucSource source);
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/include/utils/guc_tables.h,v 1.15 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/guc_tables.h,v 1.16 2004/08/29 05:06:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int status; /* previous status bits, see below */
GucSource tentative_source; /* source of the tentative_value */
GucSource source; /* source of the actual value */
- union config_var_value tentative_val; /* previous tentative val */
- union config_var_value value; /* previous actual value */
+ union config_var_value tentative_val; /* previous tentative val */
+ union config_var_value value; /* previous actual value */
} GucStack;
/*
#define GUC_REPORT 0x0010 /* auto-report changes to client */
#define GUC_NOT_IN_SAMPLE 0x0020 /* not in postgresql.conf.sample */
#define GUC_DISALLOW_IN_FILE 0x0040 /* can't set in postgresql.conf */
-#define GUC_CUSTOM_PLACEHOLDER 0x0080 /* placeholder for a custom variable */
+#define GUC_CUSTOM_PLACEHOLDER 0x0080 /* placeholder for a custom
+ * variable */
/* bit values in status field */
#define GUC_HAVE_TENTATIVE 0x0001 /* tentative value is defined */
extern void build_guc_variables(void);
-#endif /* GUC_TABLES_H */
+#endif /* GUC_TABLES_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.31 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/hsearch.h,v 1.32 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
*/
typedef uint32 (*HashValueFunc) (const void *key, Size keysize);
typedef int (*HashCompareFunc) (const void *key1, const void *key2,
- Size keysize);
+ Size keysize);
/*
* Space allocation function for a hashtable --- designed to match malloc().
typedef struct HASHELEMENT
{
struct HASHELEMENT *link; /* link to next entry in same bucket */
- uint32 hashvalue; /* hash function result for this entry */
+ uint32 hashvalue; /* hash function result for this entry */
} HASHELEMENT;
/* A hash bucket is a linked list of HASHELEMENTs */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.89 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/lsyscache.h,v 1.90 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern bool op_in_opclass(Oid opno, Oid opclass);
extern void get_op_opclass_properties(Oid opno, Oid opclass,
- int *strategy, Oid *subtype,
- bool *recheck);
+ int *strategy, Oid *subtype,
+ bool *recheck);
extern Oid get_opclass_member(Oid opclass, Oid subtype, int16 strategy);
extern Oid get_op_hash_function(Oid opno);
extern Oid get_opclass_proc(Oid opclass, Oid subtype, int16 procnum);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.51 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/portal.h,v 1.52 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
const char *name; /* portal's name */
MemoryContext heap; /* subsidiary memory for portal */
ResourceOwner resowner; /* resources owned by portal */
- void (*cleanup) (Portal portal); /* cleanup hook */
+ void (*cleanup) (Portal portal); /* cleanup hook */
TransactionId createXact; /* the xid of the creating xact */
/* The query or queries the portal will execute */
extern void AtAbort_Portals(void);
extern void AtCleanup_Portals(void);
extern void AtSubCommit_Portals(TransactionId parentXid,
- ResourceOwner parentXactOwner);
+ ResourceOwner parentXactOwner);
extern void AtSubAbort_Portals(TransactionId parentXid,
- ResourceOwner parentXactOwner);
+ ResourceOwner parentXactOwner);
extern void AtSubCleanup_Portals(void);
extern Portal CreatePortal(const char *name, bool allowDup, bool dupSilent);
extern Portal CreateNewPortal(void);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.78 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/rel.h,v 1.79 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
RelFileNode rd_node; /* relation physical identifier */
/* use "struct" here to avoid needing to include smgr.h: */
- struct SMgrRelationData *rd_smgr; /* cached file handle, or NULL */
+ struct SMgrRelationData *rd_smgr; /* cached file handle, or NULL */
BlockNumber rd_targblock; /* current insertion target block, or
* InvalidBlockNumber */
int rd_refcnt; /* reference count */
bool rd_istemp; /* rel uses the local buffer mgr */
bool rd_isnailed; /* rel is nailed in cache */
bool rd_isvalid; /* relcache entry is valid */
- char rd_indexvalid; /* state of rd_indexlist: 0 = not valid,
- * 1 = valid, 2 = temporarily forced */
- TransactionId rd_createxact; /* rel was created in current xact */
+ char rd_indexvalid; /* state of rd_indexlist: 0 = not valid, 1
+ * = valid, 2 = temporarily forced */
+ TransactionId rd_createxact; /* rel was created in current xact */
+
/*
* rd_createxact is the XID of the highest subtransaction the rel has
* survived into; or zero if the rel was not created in the current
- * transaction. This should be relied on only for optimization purposes;
- * it is possible for new-ness to be "forgotten" (eg, after CLUSTER).
+ * transaction. This should be relied on only for optimization
+ * purposes; it is possible for new-ness to be "forgotten" (eg, after
+ * CLUSTER).
*/
Form_pg_class rd_rel; /* RELATION tuple */
TupleDesc rd_att; /* tuple descriptor */
* index access support info (used only for an index relation)
*
* Note: only default operators and support procs for each opclass are
- * cached, namely those with subtype zero. The arrays are indexed by
+ * cached, namely those with subtype zero. The arrays are indexed by
* strategy or support number, which is a sufficient identifier given
* that restriction.
*/
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.44 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/relcache.h,v 1.45 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern void AtEOXact_RelationCache(bool isCommit);
extern void AtEOSubXact_RelationCache(bool isCommit, TransactionId myXid,
- TransactionId parentXid);
+ TransactionId parentXid);
/*
* Routines to help manage rebuilding of relcache init file
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.3 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/resowner.h,v 1.4 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Resource releasing is done in three phases: pre-locks, locks, and
- * post-locks. The pre-lock phase must release any resources that are
+ * post-locks. The pre-lock phase must release any resources that are
* visible to other backends (such as pinned buffers); this ensures that
* when we release a lock that another backend may be waiting on, it will
* see us as being fully out of our transaction. The post-lock phase
* by providing a callback of this form.
*/
typedef void (*ResourceReleaseCallback) (ResourceReleasePhase phase,
- bool isCommit,
- bool isTopLevel,
- void *arg);
+ bool isCommit,
+ bool isTopLevel,
+ void *arg);
/*
/* generic routines */
extern ResourceOwner ResourceOwnerCreate(ResourceOwner parent,
- const char *name);
+ const char *name);
extern void ResourceOwnerRelease(ResourceOwner owner,
- ResourceReleasePhase phase,
- bool isCommit,
- bool isTopLevel);
+ ResourceReleasePhase phase,
+ bool isCommit,
+ bool isTopLevel);
extern void ResourceOwnerDelete(ResourceOwner owner);
extern ResourceOwner ResourceOwnerGetParent(ResourceOwner owner);
extern void ResourceOwnerNewParent(ResourceOwner owner,
- ResourceOwner newparent);
+ ResourceOwner newparent);
extern void RegisterResourceReleaseCallback(ResourceReleaseCallback callback,
- void *arg);
+ void *arg);
extern void UnregisterResourceReleaseCallback(ResourceReleaseCallback callback,
- void *arg);
+ void *arg);
/* support for buffer refcount management */
extern void ResourceOwnerEnlargeBuffers(ResourceOwner owner);
/* support for catcache refcount management */
extern void ResourceOwnerEnlargeCatCacheRefs(ResourceOwner owner);
extern void ResourceOwnerRememberCatCacheRef(ResourceOwner owner,
- HeapTuple tuple);
+ HeapTuple tuple);
extern void ResourceOwnerForgetCatCacheRef(ResourceOwner owner,
- HeapTuple tuple);
+ HeapTuple tuple);
extern void ResourceOwnerEnlargeCatCacheListRefs(ResourceOwner owner);
extern void ResourceOwnerRememberCatCacheListRef(ResourceOwner owner,
- CatCList *list);
+ CatCList *list);
extern void ResourceOwnerForgetCatCacheListRef(ResourceOwner owner,
- CatCList *list);
+ CatCList *list);
/* support for relcache refcount management */
extern void ResourceOwnerEnlargeRelationRefs(ResourceOwner owner);
extern void ResourceOwnerRememberRelationRef(ResourceOwner owner,
- Relation rel);
+ Relation rel);
extern void ResourceOwnerForgetRelationRef(ResourceOwner owner,
- Relation rel);
+ Relation rel);
#endif /* RESOWNER_H */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.18 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/selfuncs.h,v 1.19 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
double input_rows);
extern Selectivity estimate_hash_bucketsize(Query *root, Node *hashkey,
- int nbuckets);
+ int nbuckets);
extern Datum btcostestimate(PG_FUNCTION_ARGS);
extern Datum rtcostestimate(PG_FUNCTION_ARGS);
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.38 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/timestamp.h,v 1.39 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Internal routines (not fmgr-callable) */
extern int tm2timestamp(struct pg_tm * tm, fsec_t fsec, int *tzp, Timestamp *dt);
-extern int timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm,
+extern int timestamp2tm(Timestamp dt, int *tzp, struct pg_tm * tm,
fsec_t *fsec, char **tzn);
extern void dt2time(Timestamp dt, int *hour, int *min, int *sec, fsec_t *fsec);
extern void GetEpochTime(struct pg_tm * tm);
extern int timestamp_cmp_internal(Timestamp dt1, Timestamp dt2);
+
/* timestamp comparison works for timestamptz also */
#define timestamptz_cmp_internal(dt1,dt2) timestamp_cmp_internal(dt1, dt2)
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.5 2004/08/29 04:13:11 momjian Exp $
+ * $PostgreSQL: pgsql/src/include/utils/typcache.h,v 1.6 2004/08/29 05:06:59 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Pre-set-up fmgr call info for the equality operator and the btree
* comparison function. These are kept in the type cache to avoid
- * problems with memory leaks in repeated calls to array_eq and array_cmp.
- * There is not currently a need to maintain call info for the lt_opr
- * or gt_opr.
+ * problems with memory leaks in repeated calls to array_eq and
+ * array_cmp. There is not currently a need to maintain call info for
+ * the lt_opr or gt_opr.
*/
FmgrInfo eq_opr_finfo;
FmgrInfo cmp_proc_finfo;
/*
* Tuple descriptor if it's a composite type (row type). NULL if not
- * composite or information hasn't yet been requested. (NOTE: this
- * is actually just a link to information maintained by relcache.c.)
+ * composite or information hasn't yet been requested. (NOTE: this is
+ * actually just a link to information maintained by relcache.c.)
*/
TupleDesc tupDesc;
} TypeCacheEntry;
extern TupleDesc lookup_rowtype_tupdesc(Oid type_id, int32 typmod);
extern TupleDesc lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod,
- bool noError);
+ bool noError);
extern void assign_record_type_typmod(TupleDesc tupDesc);
#include
-char *ECPGalloc(long, int);
+char *ECPGalloc(long, int);
static int
deccall2(decimal * arg1, decimal * arg2, int (*ptr) (numeric *, numeric *))
*nres;
int i;
- /* we must NOT set the result to NULL here because it may be the same variable as one of the arguments */
+ /*
+ * we must NOT set the result to NULL here because it may be the same
+ * variable as one of the arguments
+ */
if (risnull(CDECIMALTYPE, (char *) arg1) || risnull(CDECIMALTYPE, (char *) arg2))
return 0;
if (i == 0) /* No error */
{
-
+
/* set the result to null in case it errors out later */
rsetnull(CDECIMALTYPE, (char *) result);
PGTYPESnumeric_to_decimal(nres, result);
}
-
+
PGTYPESnumeric_free(nres);
PGTYPESnumeric_free(a1);
PGTYPESnumeric_free(a2);
int
deccvasc(char *cp, int len, decimal * np)
{
- char *str = ecpg_strndup(cp, len); /* decimal_in always converts the
- * complete string */
+ char *str = ecpg_strndup(cp, len); /* decimal_in always
+ * converts the complete
+ * string */
int ret = 0;
numeric *result;
int
decdiv(decimal * n1, decimal * n2, decimal * result)
{
-
+
int i;
i = deccall3(n1, n2, result, PGTYPESnumeric_div);
decmul(decimal * n1, decimal * n2, decimal * result)
{
int i;
-
+
i = deccall3(n1, n2, result, PGTYPESnumeric_mul);
if (i != 0)
decsub(decimal * n1, decimal * n2, decimal * result)
{
int i;
-
+
i = deccall3(n1, n2, result, PGTYPESnumeric_sub);
if (i != 0)
rstrdate(char *str, date * d)
{
date dat;
- char strbuf[10];
- int i,j;
-
- rsetnull(CDATETYPE, (char *)&dat);
- /*
- * we have to flip the year month date around for postgres
- * expects yyyymmdd
- *
- */
-
- for (i=0,j=0; i < 10; i++ )
+ char strbuf[10];
+ int i,
+ j;
+
+ rsetnull(CDATETYPE, (char *) &dat);
+
+ /*
+ * we have to flip the year month date around for postgres expects
+ * yyyymmdd
+ *
+ */
+
+ for (i = 0, j = 0; i < 10; i++)
{
/* ignore non-digits */
- if ( isdigit((unsigned char) str[i]) )
+ if (isdigit((unsigned char) str[i]))
{
-
+
/* j only increments if it is a digit */
- switch(j)
+ switch (j)
{
- /* stick the month into the 4th, 5th position */
+ /* stick the month into the 4th, 5th position */
case 0:
case 1:
- strbuf[j+4] = str[i];
+ strbuf[j + 4] = str[i];
break;
- /* stick the day into the 6th, and 7th position */
+ /* stick the day into the 6th, and 7th position */
case 2:
case 3:
- strbuf[j+4] = str[i];
+ strbuf[j + 4] = str[i];
break;
- /* stick the year into the first 4 positions */
+ /* stick the year into the first 4 positions */
case 4:
case 5:
case 6:
case 7:
- strbuf[j-4] = str[i];
+ strbuf[j - 4] = str[i];
break;
-
+
}
j++;
- }
- }
+ }
+ }
strbuf[8] = '\0';
dat = PGTYPESdate_from_asc(strbuf, NULL);
/* And the datetime stuff */
void
-dtcurrent(timestamp *ts)
+dtcurrent(timestamp * ts)
{
PGTYPEStimestamp_current(ts);
}
int
-dtcvasc(char *str, timestamp *ts)
+dtcvasc(char *str, timestamp * ts)
{
timestamp ts_tmp;
int i;
}
int
-dtsub(timestamp *ts1, timestamp *ts2, interval *iv)
+dtsub(timestamp * ts1, timestamp * ts2, interval * iv)
{
return PGTYPEStimestamp_sub(ts1, ts2, iv);
}
int
-dttoasc(timestamp *ts, char *output)
+dttoasc(timestamp * ts, char *output)
{
char *asctime = PGTYPEStimestamp_to_asc(*ts);
}
int
-dttofmtasc(timestamp *ts, char *output, int str_len, char *fmtstr)
+dttofmtasc(timestamp * ts, char *output, int str_len, char *fmtstr)
{
return PGTYPEStimestamp_fmt_asc(ts, output, str_len, fmtstr);
}
int
-intoasc(interval *i, char *str)
+intoasc(interval * i, char *str)
{
str = PGTYPESinterval_to_asc(i);
* of the long value
*/
static void
-initValue (long lng_val)
-{
- int i, j;
- long l, dig;
-
- /* set some obvious things */
- value.val = lng_val >= 0 ? lng_val : lng_val * (-1);
- value.sign = lng_val >= 0 ? '+' : '-';
- value.maxdigits = log10 (2) * (8 * sizeof (long) - 1);
-
- /* determine the number of digits */
- i = 0;
- l = 1;
- do
- {
- i++;
- l *= 10;
- }
- while ((l - 1) < value.val && l <= LONG_MAX / 10);
-
- if (l <= LONG_MAX/10)
- {
- value.digits = i;
- l /= 10;
- }
- else
- value.digits = i + 1;
-
- value.remaining = value.digits;
-
- /* convert the long to string */
- value.val_string = (char *) malloc (value.digits + 1);
- dig = value.val;
- for (i = value.digits, j = 0; i > 0; i--, j++)
- {
- value.val_string[j] = dig/l + '0';
- dig = dig % l;
- l /= 10;
- }
- value.val_string[value.digits] = '\0';
+initValue(long lng_val)
+{
+ int i,
+ j;
+ long l,
+ dig;
+
+ /* set some obvious things */
+ value.val = lng_val >= 0 ? lng_val : lng_val * (-1);
+ value.sign = lng_val >= 0 ? '+' : '-';
+ value.maxdigits = log10(2) * (8 * sizeof(long) - 1);
+
+ /* determine the number of digits */
+ i = 0;
+ l = 1;
+ do
+ {
+ i++;
+ l *= 10;
+ }
+ while ((l - 1) < value.val && l <= LONG_MAX / 10);
+
+ if (l <= LONG_MAX / 10)
+ {
+ value.digits = i;
+ l /= 10;
+ }
+ else
+ value.digits = i + 1;
+
+ value.remaining = value.digits;
+
+ /* convert the long to string */
+ value.val_string = (char *) malloc(value.digits + 1);
+ dig = value.val;
+ for (i = value.digits, j = 0; i > 0; i--, j++)
+ {
+ value.val_string[j] = dig / l + '0';
+ dig = dig % l;
+ l /= 10;
+ }
+ value.val_string[value.digits] = '\0';
}
/* return the position oft the right-most dot in some string */
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.22 2004/06/10 22:26:21 momjian Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/connect.c,v 1.23 2004/08/29 05:06:59 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#ifdef ENABLE_THREAD_SAFETY
static pthread_mutex_t connections_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_key_t actual_connection_key;
+static pthread_key_t actual_connection_key;
static pthread_once_t actual_connection_key_once = PTHREAD_ONCE_INIT;
+
#else
static struct connection *actual_connection = NULL;
#endif
static void
ecpg_actual_connection_init(void)
{
- pthread_key_create(&actual_connection_key, NULL);
+ pthread_key_create(&actual_connection_key, NULL);
}
#endif
}
#ifdef ENABLE_THREAD_SAFETY
- if( pthread_getspecific(actual_connection_key) == act )
- pthread_setspecific(actual_connection_key, all_connections);
+ if (pthread_getspecific(actual_connection_key) == act)
+ pthread_setspecific(actual_connection_key, all_connections);
#else
if (actual_connection == act)
actual_connection = all_connections;
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.26 2004/07/04 15:02:22 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/data.c,v 1.27 2004/08/29 05:06:59 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
#include "pgtypes_timestamp.h"
#include "pgtypes_interval.h"
-static bool garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat)
+static bool
+garbage_left(enum ARRAY_TYPE isarray, char *scan_length, enum COMPAT_MODE compat)
{
- /* INFORMIX allows for selecting a numeric into an int, the result is truncated */
- if (isarray == ECPG_ARRAY_NONE && INFORMIX_MODE(compat) && *scan_length == '.')
+ /*
+ * INFORMIX allows for selecting a numeric into an int, the result is
+ * truncated
+ */
+ if (isarray == ECPG_ARRAY_NONE && INFORMIX_MODE(compat) && *scan_length == '.')
return false;
-
+
if (isarray == ECPG_ARRAY_ARRAY && *scan_length != ',' && *scan_length != '}')
return true;
char *pval = (char *) PQgetvalue(results, act_tuple, act_field);
int value_for_indicator = 0;
- ECPGlog("ECPGget_data line %d: RESULT: %s offset: %ld array: %s\n", lineno, pval ? pval : "", offset, isarray?"Yes":"No");
+ ECPGlog("ECPGget_data line %d: RESULT: %s offset: %ld array: %s\n", lineno, pval ? pval : "", offset, isarray ? "Yes" : "No");
/* pval is a pointer to the value */
/* let's check if it really is an array if it should be one */
{
*((unsigned long long int *) (var + offset * act_tuple)) = strtoull(pval, &scan_length, 10);
if ((isarray && *scan_length != ',' && *scan_length != '}')
- || (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' ')) /* Garbage left */
+ || (!isarray && !(INFORMIX_MODE(compat) && *scan_length == '.') && *scan_length != '\0' && *scan_length != ' ')) /* Garbage left */
{
ECPGraise(lineno, ECPG_UINT_FORMAT, ECPG_SQLSTATE_DATATYPE_MISMATCH, pval);
return (false);
if (INFORMIX_MODE(compat))
{
- /* Informix wants its own NULL value here instead of an error */
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
ECPGset_noind_null(ECPGt_numeric, nres);
}
else
{
if (INFORMIX_MODE(compat))
{
- /* Informix wants its own NULL value here instead of an error */
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
ECPGset_noind_null(ECPGt_interval, ires);
}
else
{
if (INFORMIX_MODE(compat))
{
- /* Informix wants its own NULL value here instead of an error */
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
ECPGset_noind_null(ECPGt_date, &ddres);
}
else
{
if (INFORMIX_MODE(compat))
{
- /* Informix wants its own NULL value here instead of an error */
+ /*
+ * Informix wants its own NULL value here
+ * instead of an error
+ */
ECPGset_noind_null(ECPGt_timestamp, &tres);
}
else
/* dynamic SQL support routines
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.11 2004/07/05 09:45:53 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/descriptor.c,v 1.12 2004/08/29 05:06:59 momjian Exp $
*/
#define POSTGRES_ECPG_INTERNAL
ECPGset_desc_header(int lineno, char *desc_name, int count)
{
struct descriptor *desc;
-
+
for (desc = all_descriptors; desc; desc = desc->next)
{
- if (strcmp(desc_name, desc->name)==0)
+ if (strcmp(desc_name, desc->name) == 0)
break;
}
for (desc = all_descriptors; desc; desc = desc->next)
{
- if (strcmp(desc_name, desc->name)==0)
+ if (strcmp(desc_name, desc->name) == 0)
break;
}
}
if (!(var = (struct variable *) ECPGalloc(sizeof(struct variable), lineno)))
- return false;
+ return false;
va_start(args, index);
{
enum ECPGdtype itemtype;
enum ECPGttype type;
- const char *tobeinserted = NULL;
+ const char *tobeinserted = NULL;
bool malloced;
itemtype = va_arg(args, enum ECPGdtype);
switch (itemtype)
{
case ECPGd_data:
- {
- if (!ECPGstore_input(lineno, true, var, &tobeinserted, &malloced))
{
- ECPGfree(var);
- return false;
+ if (!ECPGstore_input(lineno, true, var, &tobeinserted, &malloced))
+ {
+ ECPGfree(var);
+ return false;
+ }
+
+ desc_item->data = (char *) tobeinserted;
+ tobeinserted = NULL;
+ break;
}
-
- desc_item->data = (char *) tobeinserted;
- tobeinserted = NULL;
- break;
- }
case ECPGd_indicator:
set_int_item(lineno, &desc_item->indicator, var->pointer, var->type);
break;
default:
- {
- char type_str[20];
- snprintf(type_str, sizeof(type_str), "%d", itemtype);
- ECPGraise(lineno, ECPG_UNKNOWN_DESCRIPTOR_ITEM, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, type_str);
- ECPGfree(var);
- return false;
- }
+ {
+ char type_str[20];
+
+ snprintf(type_str, sizeof(type_str), "%d", itemtype);
+ ECPGraise(lineno, ECPG_UNKNOWN_DESCRIPTOR_ITEM, ECPG_SQLSTATE_ECPG_INTERNAL_ERROR, type_str);
+ ECPGfree(var);
+ return false;
+ }
}
- /*if (itemtype == ECPGd_data)
- {
- free(desc_item->data);
- desc_item->data = NULL;
- }*/
+ /*
+ * if (itemtype == ECPGd_data) { free(desc_item->data);
+ * desc_item->data = NULL; }
+ */
}
while (true);
ECPGfree(var);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.37 2004/07/05 09:45:53 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/execute.c,v 1.38 2004/08/29 05:06:59 momjian Exp $ */
/*
* The aim is to get a simpler inteface to the database routines.
}
void
-ECPGget_variable(va_list *ap, enum ECPGttype type, struct variable *var, bool indicator)
+ECPGget_variable(va_list *ap, enum ECPGttype type, struct variable * var, bool indicator)
{
var->type = type;
var->pointer = va_arg(*ap, char *);
var->varcharsize = va_arg(*ap, long);
var->arrsize = va_arg(*ap, long);
var->offset = va_arg(*ap, long);
-
+
if (var->arrsize == 0 || var->varcharsize == 0)
var->value = *((char **) (var->pointer));
else
var->value = var->pointer;
/*
- * negative values are used to indicate an array without given
- * bounds
+ * negative values are used to indicate an array without given bounds
*/
/* reset to zero for us */
if (var->arrsize < 0)
var->varcharsize = 0;
var->next = NULL;
-
+
if (indicator)
{
var->ind_type = va_arg(*ap, enum ECPGttype);
{
struct ECPGtype_information_cache *new_entry
= (struct ECPGtype_information_cache *) ECPGalloc(sizeof(struct ECPGtype_information_cache), lineno);
-
+
new_entry->oid = oid;
new_entry->isarray = isarray;
new_entry->next = *cache;
*cache = new_entry;
}
-
+
static enum ARRAY_TYPE
ECPGis_type_an_array(int type, const struct statement * stmt, const struct variable * var)
{
- char *array_query;
- enum ARRAY_TYPE isarray = ECPG_ARRAY_NOT_SET;
- PGresult *query;
+ char *array_query;
+ enum ARRAY_TYPE isarray = ECPG_ARRAY_NOT_SET;
+ PGresult *query;
struct ECPGtype_information_cache *cache_entry;
if ((stmt->connection->cache_head) == NULL)
if (cache_entry->oid == type)
return cache_entry->isarray;
}
-
+
array_query = (char *) ECPGalloc(strlen("select typlen from pg_type where oid= and typelem<>0") + 11, stmt->lineno);
sprintf(array_query, "select typlen from pg_type where oid=%d and typelem<>0", type);
query = PQexec(stmt->connection->connection, array_query);
ECPGfree(array_query);
- if (PQresultStatus(query) == PGRES_TUPLES_OK )
+ if (PQresultStatus(query) == PGRES_TUPLES_OK)
{
- if ( PQntuples(query) == 0 )
+ if (PQntuples(query) == 0)
isarray = ECPG_ARRAY_NONE;
else
{
}
PQclear(query);
ECPGtypeinfocache_push(&(stmt->connection->cache_head), type, isarray, stmt->lineno);
- ECPGlog("ECPGis_type_an_array line %d: TYPE database: %d C: %d array: %s\n", stmt->lineno, type, var->type, isarray?"Yes":"No");
+ ECPGlog("ECPGis_type_an_array line %d: TYPE database: %d C: %d array: %s\n", stmt->lineno, type, var->type, isarray ? "Yes" : "No");
return isarray;
}
ECPGstore_result(const PGresult *results, int act_field,
const struct statement * stmt, struct variable * var)
{
- enum ARRAY_TYPE isarray;
+ enum ARRAY_TYPE isarray;
int act_tuple,
ntuples = PQntuples(results);
bool status = true;
{
ECPGlog("ECPGstore_result line %d: Incorrect number of matches: %d don't fit into array of %d\n",
stmt->lineno, ntuples, var->arrsize);
- ECPGraise(stmt->lineno, INFORMIX_MODE(stmt->compat)?ECPG_INFORMIX_SUBSELECT_NOT_ONE:ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
+ ECPGraise(stmt->lineno, INFORMIX_MODE(stmt->compat) ? ECPG_INFORMIX_SUBSELECT_NOT_ONE : ECPG_TOO_MANY_MATCHES, ECPG_SQLSTATE_CARDINALITY_VIOLATION, NULL);
return false;
}
}
case ECPGt_unsigned_char:
{
/* set slen to string length if type is char * */
- int slen = (var->varcharsize == 0) ? strlen((char *) var->value) : var->varcharsize;
+ int slen = (var->varcharsize == 0) ? strlen((char *) var->value) : var->varcharsize;
if (!(newcopy = ECPGalloc(slen + 1, lineno)))
return false;
free(str);
}
break;
-
+
case ECPGt_descriptor:
break;
PGresult *results;
PGnotify *notify;
struct variable *var;
- int desc_counter = 0;
+ int desc_counter = 0;
copiedquery = ECPGstrdup(stmt->command, stmt->lineno);
while (var)
{
char *newcopy = NULL;
- const char *tobeinserted;
+ const char *tobeinserted;
char *p;
- bool malloced = FALSE;
- int hostvarl = 0;
+ bool malloced = FALSE;
+ int hostvarl = 0;
tobeinserted = NULL;
-
- /* A descriptor is a special case since it contains many variables but is listed only once. */
+
+ /*
+ * A descriptor is a special case since it contains many variables
+ * but is listed only once.
+ */
if (var->type == ECPGt_descriptor)
{
- /* We create an additional variable list here, so the same logic applies. */
+ /*
+ * We create an additional variable list here, so the same
+ * logic applies.
+ */
struct variable desc_inlist;
struct descriptor *desc;
struct descriptor_item *desc_item;
+
for (desc = all_descriptors; desc; desc = desc->next)
{
if (strcmp(var->pointer, desc->name) == 0)
break;
}
-
+
if (desc == NULL)
{
ECPGraise(stmt->lineno, ECPG_UNKNOWN_DESCRIPTOR, ECPG_SQLSTATE_INVALID_SQL_DESCRIPTOR_NAME, var->pointer);
return false;
}
-
+
desc_counter++;
if (desc->count < 0 || desc->count >= desc_counter)
{
}
if (!ECPGstore_input(stmt->lineno, stmt->force_indicator, &desc_inlist, &tobeinserted, &malloced))
return false;
-
+
break;
}
}
if (!ECPGstore_input(stmt->lineno, stmt->force_indicator, var, &tobeinserted, &malloced))
return false;
}
-
+
if (tobeinserted)
{
/*
- * Now tobeinserted points to an area that is to be inserted at
- * the first %s
+ * Now tobeinserted points to an area that is to be inserted
+ * at the first %s
*/
if (!(newcopy = (char *) ECPGalloc(strlen(copiedquery) + strlen(tobeinserted) + 1, stmt->lineno)))
return false;
if ((p = next_insert(newcopy + hostvarl)) == NULL)
{
/*
- * We have an argument but we dont have the matched up string
- * in the string
+ * We have an argument but we dont have the matched up
+ * string in the string
*/
ECPGraise(stmt->lineno, ECPG_TOO_MANY_ARGUMENTS, ECPG_SQLSTATE_USING_CLAUSE_DOES_NOT_MATCH_PARAMETERS, NULL);
return false;
hostvarl = strlen(newcopy);
/*
- * The strange thing in the second argument is the rest of the
- * string from the old string
+ * The strange thing in the second argument is the rest of
+ * the string from the old string
*/
strcat(newcopy,
copiedquery
}
/*
- * Now everything is safely copied to the newcopy. Lets free the
- * oldcopy and let the copiedquery get the var->value from the
- * newcopy.
+ * Now everything is safely copied to the newcopy. Lets free
+ * the oldcopy and let the copiedquery get the var->value from
+ * the newcopy.
*/
if (malloced)
{
ECPGfree(copiedquery);
copiedquery = newcopy;
}
-
+
if (desc_counter == 0)
- var = var->next;
+ var = var->next;
}
/* Check if there are unmatched things left. */
enum COMPAT_MODE
{
- ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE
+ ECPG_COMPAT_PGSQL = 0, ECPG_COMPAT_INFORMIX, ECPG_COMPAT_INFORMIX_SE
};
#define INFORMIX_MODE(X) ((X) == ECPG_COMPAT_INFORMIX || (X) == ECPG_COMPAT_INFORMIX_SE)
enum ARRAY_TYPE
{
- ECPG_ARRAY_NOT_SET, ECPG_ARRAY_ARRAY, ECPG_ARRAY_VECTOR, ECPG_ARRAY_NONE
+ ECPG_ARRAY_NOT_SET, ECPG_ARRAY_ARRAY, ECPG_ARRAY_VECTOR, ECPG_ARRAY_NONE
};
/* Here are some methods used by the lib. */
/* Returns a pointer to a string containing a simple type name. */
-void ECPGadd_mem (void *ptr, int lineno);
-
-bool ECPGget_data (const PGresult *, int, int, int, enum ECPGttype type,
- enum ECPGttype, char *, char *, long, long, long,
- enum ARRAY_TYPE, enum COMPAT_MODE, bool);
-struct connection *ECPGget_connection (const char *);
-char *ECPGalloc (long, int);
-char *ECPGrealloc (void *, long, int);
-void ECPGfree (void *);
-bool ECPGinit (const struct connection *, const char *, const int);
-char *ECPGstrdup (const char *, int);
-const char *ECPGtype_name (enum ECPGttype);
-unsigned int ECPGDynamicType (Oid);
-void ECPGfree_auto_mem (void);
-void ECPGclear_auto_mem (void);
-
-struct descriptor *ecpggetdescp (int, char *);
+void ECPGadd_mem(void *ptr, int lineno);
+
+bool ECPGget_data(const PGresult *, int, int, int, enum ECPGttype type,
+ enum ECPGttype, char *, char *, long, long, long,
+ enum ARRAY_TYPE, enum COMPAT_MODE, bool);
+struct connection *ECPGget_connection(const char *);
+char *ECPGalloc(long, int);
+char *ECPGrealloc(void *, long, int);
+void ECPGfree(void *);
+bool ECPGinit(const struct connection *, const char *, const int);
+char *ECPGstrdup(const char *, int);
+const char *ECPGtype_name(enum ECPGttype);
+unsigned int ECPGDynamicType(Oid);
+void ECPGfree_auto_mem(void);
+void ECPGclear_auto_mem(void);
+
+struct descriptor *ecpggetdescp(int, char *);
/* A generic varchar type. */
struct ECPGgeneric_varchar
{
- int len;
- char arr[1];
+ int len;
+ char arr[1];
};
/*
struct ECPGtype_information_cache
{
- struct ECPGtype_information_cache *next;
- int oid;
- bool isarray;
+ struct ECPGtype_information_cache *next;
+ int oid;
+ bool isarray;
};
/* structure to store one statement */
struct statement
{
- int lineno;
- char *command;
- struct connection *connection;
- enum COMPAT_MODE compat;
- bool force_indicator;
- struct variable *inlist;
- struct variable *outlist;
+ int lineno;
+ char *command;
+ struct connection *connection;
+ enum COMPAT_MODE compat;
+ bool force_indicator;
+ struct variable *inlist;
+ struct variable *outlist;
};
/* structure to store connections */
struct connection
{
- char *name;
- PGconn *connection;
- bool committed;
- int autocommit;
- struct ECPGtype_information_cache *cache_head;
- struct connection *next;
+ char *name;
+ PGconn *connection;
+ bool committed;
+ int autocommit;
+ struct ECPGtype_information_cache *cache_head;
+ struct connection *next;
};
/* structure to store descriptors */
struct descriptor
{
- char *name;
- PGresult *result;
- struct descriptor *next;
- int count;
- struct descriptor_item *items;
+ char *name;
+ PGresult *result;
+ struct descriptor *next;
+ int count;
+ struct descriptor_item *items;
};
extern struct descriptor *all_descriptors;
struct descriptor_item
{
- int num;
- char *data;
- int indicator;
- int length;
- int precision;
- int scale;
- int type;
- struct descriptor_item *next;
+ int num;
+ char *data;
+ int indicator;
+ int length;
+ int precision;
+ int scale;
+ int type;
+ struct descriptor_item *next;
};
struct variable
{
- enum ECPGttype type;
- void *value;
- void *pointer;
- long varcharsize;
- long arrsize;
- long offset;
- enum ECPGttype ind_type;
- void *ind_value;
- void *ind_pointer;
- long ind_varcharsize;
- long ind_arrsize;
- long ind_offset;
- struct variable *next;
+ enum ECPGttype type;
+ void *value;
+ void *pointer;
+ long varcharsize;
+ long arrsize;
+ long offset;
+ enum ECPGttype ind_type;
+ void *ind_value;
+ void *ind_pointer;
+ long ind_varcharsize;
+ long ind_arrsize;
+ long ind_offset;
+ struct variable *next;
};
-PGresult **ECPGdescriptor_lvalue (int line, const char *descriptor);
+PGresult **ECPGdescriptor_lvalue(int line, const char *descriptor);
-bool ECPGstore_result (const PGresult * results, int act_field,
- const struct statement *stmt, struct variable *var);
-bool ECPGstore_input(const int, const bool, const struct variable *, const char **, bool *);
-void ECPGget_variable(va_list *, enum ECPGttype, struct variable *, bool);
+bool ECPGstore_result(const PGresult *results, int act_field,
+ const struct statement * stmt, struct variable * var);
+bool ECPGstore_input(const int, const bool, const struct variable *, const char **, bool *);
+void ECPGget_variable(va_list *, enum ECPGttype, struct variable *, bool);
/* SQLSTATE values generated or processed by ecpglib (intentionally
* not exported -- users should refer to the codes directly) */
#define ECPG_SQLSTATE_ECPG_INTERNAL_ERROR "YE000"
#define ECPG_SQLSTATE_ECPG_OUT_OF_MEMORY "YE001"
-#endif /* _ECPG_LIB_EXTERN_H */
+#endif /* _ECPG_LIB_EXTERN_H */
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.21 2004/06/27 12:28:40 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/ecpglib/misc.c,v 1.22 2004/08/29 05:06:59 momjian Exp $ */
#define POSTGRES_ECPG_INTERNAL
#include "postgres_fe.h"
}
#ifdef ENABLE_THREAD_SAFETY
-static void ecpg_sqlca_key_destructor(void *arg)
+static void
+ecpg_sqlca_key_destructor(void *arg)
{
- if( arg != NULL )
- free(arg); /* sqlca structure allocated in ECPGget_sqlca */
+ if (arg != NULL)
+ free(arg); /* sqlca structure allocated in
+ * ECPGget_sqlca */
}
-static void ecpg_sqlca_key_init(void)
+static void
+ecpg_sqlca_key_init(void)
{
- pthread_key_create(&sqlca_key, ecpg_sqlca_key_destructor);
+ pthread_key_create(&sqlca_key, ecpg_sqlca_key_destructor);
}
#endif
typedef timestamp dtime_t;
typedef interval intrvl_t;
-#endif /* ndef _ECPG_DATETIME_H */
+#endif /* ndef _ECPG_DATETIME_H */
typedef decimal dec_t;
-#endif /* ndef _ECPG_DECIMAL_H */
+#endif /* ndef _ECPG_DECIMAL_H */
/*
* This file contains stuff needed to be as compatible to Informix as possible.
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg_informix.h,v 1.15 2004/05/10 13:46:06 meskes Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/ecpg_informix.h,v 1.16 2004/08/29 05:06:59 momjian Exp $
*/
#ifndef _ECPG_INFORMIX_H
#define _ECPG_INFORMIX_H
#define SQLNOTFOUND 100
#define ECPG_INFORMIX_NUM_OVERFLOW -1200
-#define ECPG_INFORMIX_NUM_UNDERFLOW -1201
+#define ECPG_INFORMIX_NUM_UNDERFLOW -1201
#define ECPG_INFORMIX_DIVIDE_ZERO -1202
#define ECPG_INFORMIX_BAD_YEAR -1204
#define ECPG_INFORMIX_BAD_MONTH -1205
#define ECPG_INFORMIX_BAD_DAY -1206
#define ECPG_INFORMIX_ENOSHORTDATE -1209
#define ECPG_INFORMIX_DATE_CONVERT -1210
-#define ECPG_INFORMIX_OUT_OF_MEMORY -1211
+#define ECPG_INFORMIX_OUT_OF_MEMORY -1211
#define ECPG_INFORMIX_ENOTDMY -1212
#define ECPG_INFORMIX_BAD_NUMERIC -1213
#define ECPG_INFORMIX_BAD_EXPONENT -1216
#define ECPG_INFORMIX_EXTRA_CHARS -1264
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
extern int rdatestr(date, char *);
-extern void rtoday(date *);
+extern void rtoday(date *);
extern int rjulmdy(date, short *);
extern int rdefmtdate(date *, char *, char *);
extern int rfmtdate(date, char *, char *);
extern int rtypalign(int, int);
extern int rtypmsize(int, int);
extern int rtypwidth(int, int);
-extern void rupshift(char *);
+extern void rupshift(char *);
extern int byleng(char *, int);
extern void ldchar(char *, int, char *);
extern void *ECPG_informix_get_var(int);
/* Informix defines these in decimal.h */
-int decadd(decimal *, decimal *, decimal *);
-int deccmp(decimal *, decimal *);
-void deccopy(decimal *, decimal *);
-int deccvasc(char *, int, decimal *);
-int deccvdbl(double, decimal *);
-int deccvint(int, decimal *);
-int deccvlong(long, decimal *);
-int decdiv(decimal *, decimal *, decimal *);
-int decmul(decimal *, decimal *, decimal *);
-int decsub(decimal *, decimal *, decimal *);
-int dectoasc(decimal *, char *, int, int);
-int dectodbl(decimal *, double *);
-int dectoint(decimal *, int *);
-int dectolong(decimal *, long *);
+int decadd(decimal *, decimal *, decimal *);
+int deccmp(decimal *, decimal *);
+void deccopy(decimal *, decimal *);
+int deccvasc(char *, int, decimal *);
+int deccvdbl(double, decimal *);
+int deccvint(int, decimal *);
+int deccvlong(long, decimal *);
+int decdiv(decimal *, decimal *, decimal *);
+int decmul(decimal *, decimal *, decimal *);
+int decsub(decimal *, decimal *, decimal *);
+int dectoasc(decimal *, char *, int, int);
+int dectodbl(decimal *, double *);
+int dectoint(decimal *, int *);
+int dectolong(decimal *, long *);
/* Informix defines these in datetime.h */
-extern void dtcurrent(timestamp *);
-extern int dtcvasc(char *, timestamp *);
-extern int dtsub(timestamp *, timestamp *, interval *);
+extern void dtcurrent(timestamp *);
+extern int dtcvasc(char *, timestamp *);
+extern int dtsub(timestamp *, timestamp *, interval *);
extern int dttoasc(timestamp *, char *);
extern int dttofmtasc(timestamp *, char *, int, char *);
extern int intoasc(interval *, char *);
}
#endif
-#endif /* ndef _ECPG_INFORMIX_H */
+#endif /* ndef _ECPG_INFORMIX_H */
typedef long date;
+
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
#endif
long month; /* months and years, after time for
* alignment */
-} interval;
+} interval;
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
* NUMERIC_NAN */
NumericDigit *buf; /* start of alloc'd space for digits[] */
NumericDigit *digits; /* decimal digits */
-} numeric;
+} numeric;
typedef struct
{
} decimal;
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
-
-numeric *PGTYPESnumeric_new(void);
+
+ numeric * PGTYPESnumeric_new(void);
void PGTYPESnumeric_free(numeric *);
numeric *PGTYPESnumeric_from_asc(char *, char **);
char *PGTYPESnumeric_to_asc(numeric *, int);
#endif
#ifdef __cplusplus
-extern "C"
+extern "C"
{
#endif
*
* Copyright (c) 2000, Christof Petig
*
- * $PostgreSQL: pgsql/src/interfaces/ecpg/include/sql3types.h,v 1.9 2003/11/29 19:52:08 pgsql Exp $
+ * $PostgreSQL: pgsql/src/interfaces/ecpg/include/sql3types.h,v 1.10 2004/08/29 05:06:59 momjian Exp $
*/
/* chapter 13.1 table 2: Codes used for SQL data types in Dynamic SQL */
* standard) */
};
-#endif /* !_ECPG_SQL3TYPES_H */
+#endif /* !_ECPG_SQL3TYPES_H */
#define CLVCHARPTRTYPE 124
#define CTYPEMAX 25
-#endif /* ndef ECPG_SQLTYPES_H */
+#endif /* ndef ECPG_SQLTYPES_H */
PGTYPESdate_dayofweek(date dDate)
{
/*
- * Sunday: 0 Monday: 1 Tuesday: 2 Wednesday: 3
- * Thursday: 4 Friday: 5 Saturday: 6
+ * Sunday: 0 Monday: 1 Tuesday: 2 Wednesday: 3 Thursday:
+ * 4 Friday: 5 Saturday: 6
*/
return (int) (dDate + date2j(2000, 1, 1) + 1) % 7;
}
int *, int *, int *, int *);
int
-PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp *d,
+PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *tz)
} /* interval2tm() */
static int
-tm2interval(struct tm * tm, fsec_t fsec, interval *span)
+tm2interval(struct tm * tm, fsec_t fsec, interval * span)
{
span->month = ((tm->tm_year * 12) + tm->tm_mon);
#ifdef HAVE_INT64_TIMESTAMP
}
char *
-PGTYPESinterval_to_asc(interval *span)
+PGTYPESinterval_to_asc(interval * span)
{
struct tm tt,
*tm = &tt;
}
int
-PGTYPESinterval_copy(interval *intvlsrc, interval *intrcldest)
+PGTYPESinterval_copy(interval * intvlsrc, interval * intrcldest)
{
intrcldest->time = intvlsrc->time;
intrcldest->month = intvlsrc->month;
* ----------
*/
static int
-apply_typmod(numeric *var, long typmod)
+apply_typmod(numeric * var, long typmod)
{
int precision;
int scale;
* ----------
*/
static int
-alloc_var(numeric *var, int ndigits)
+alloc_var(numeric * var, int ndigits)
{
digitbuf_free(var->buf);
var->buf = digitbuf_alloc(ndigits + 1);
* ----------
*/
static int
-set_var_from_str(char *str, char **ptr, numeric *dest)
+set_var_from_str(char *str, char **ptr, numeric * dest)
{
bool have_dp = FALSE;
int i = 0;
* ----------
*/
static char *
-get_str_from_var(numeric *var, int dscale)
+get_str_from_var(numeric * var, int dscale)
{
char *str;
char *cp;
}
char *
-PGTYPESnumeric_to_asc(numeric *num, int dscale)
+PGTYPESnumeric_to_asc(numeric * num, int dscale)
{
if (dscale < 0)
dscale = num->dscale;
* ----------
*/
static void
-zero_var(numeric *var)
+zero_var(numeric * var)
{
digitbuf_free(var->buf);
var->buf = NULL;
}
void
-PGTYPESnumeric_free(numeric *var)
+PGTYPESnumeric_free(numeric * var)
{
digitbuf_free(var->buf);
free(var);
* ----------
*/
static int
-cmp_abs(numeric *var1, numeric *var2)
+cmp_abs(numeric * var1, numeric * var2)
{
int i1 = 0;
int i2 = 0;
* ----------
*/
static int
-add_abs(numeric *var1, numeric *var2, numeric *result)
+add_abs(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* ----------
*/
static int
-sub_abs(numeric *var1, numeric *var2, numeric *result)
+sub_abs(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* ----------
*/
int
-PGTYPESnumeric_add(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_add(numeric * var1, numeric * var2, numeric * result)
{
/*
* Decide on the signs of the two variables what to do
* ----------
*/
int
-PGTYPESnumeric_sub(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_sub(numeric * var1, numeric * var2, numeric * result)
{
/*
* Decide on the signs of the two variables what to do
* ----------
*/
int
-PGTYPESnumeric_mul(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_mul(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_buf;
NumericDigit *res_digits;
* Note that this must be called before div_var.
*/
static int
-select_div_scale(numeric *var1, numeric *var2, int *rscale)
+select_div_scale(numeric * var1, numeric * var2, int *rscale)
{
int weight1,
weight2,
}
int
-PGTYPESnumeric_div(numeric *var1, numeric *var2, numeric *result)
+PGTYPESnumeric_div(numeric * var1, numeric * var2, numeric * result)
{
NumericDigit *res_digits;
int res_ndigits;
int
-PGTYPESnumeric_cmp(numeric *var1, numeric *var2)
+PGTYPESnumeric_cmp(numeric * var1, numeric * var2)
{
/* use cmp_abs function to calculate the result */
}
int
-PGTYPESnumeric_from_int(signed int int_val, numeric *var)
+PGTYPESnumeric_from_int(signed int int_val, numeric * var)
{
/* implicit conversion */
signed long int long_int = int_val;
}
int
-PGTYPESnumeric_from_long(signed long int long_val, numeric *var)
+PGTYPESnumeric_from_long(signed long int long_val, numeric * var)
{
/* calculate the size of the long int number */
/* a number n needs log_10 n digits */
{
size++;
reach_limit *= 10;
- } while ((reach_limit - 1) < abs_long_val && reach_limit <= LONG_MAX/10);
+ } while ((reach_limit - 1) < abs_long_val && reach_limit <= LONG_MAX / 10);
- if (reach_limit > LONG_MAX/10)
+ if (reach_limit > LONG_MAX / 10)
{
/* add the first digit and a .0 */
size += 2;
}
int
-PGTYPESnumeric_copy(numeric *src, numeric *dst)
+PGTYPESnumeric_copy(numeric * src, numeric * dst)
{
int i;
- if ( dst == NULL ) return -1;
+ if (dst == NULL)
+ return -1;
zero_var(dst);
dst->weight = src->weight;
}
int
-PGTYPESnumeric_from_double(double d, numeric *dst)
+PGTYPESnumeric_from_double(double d, numeric * dst)
{
char buffer[100];
numeric *tmp;
}
static int
-numericvar_to_double_no_overflow(numeric *var, double *dp)
+numericvar_to_double_no_overflow(numeric * var, double *dp)
{
char *tmp;
double val;
}
int
-PGTYPESnumeric_to_double(numeric *nv, double *dp)
+PGTYPESnumeric_to_double(numeric * nv, double *dp)
{
double tmp;
int i;
}
int
-PGTYPESnumeric_to_int(numeric *nv, int *ip)
+PGTYPESnumeric_to_int(numeric * nv, int *ip)
{
long l;
int i;
}
int
-PGTYPESnumeric_to_long(numeric *nv, long *lp)
+PGTYPESnumeric_to_long(numeric * nv, long *lp)
{
int i;
long l = 0;
}
int
-PGTYPESnumeric_to_decimal(numeric *src, decimal * dst)
+PGTYPESnumeric_to_decimal(numeric * src, decimal * dst)
{
int i;
}
int
-PGTYPESnumeric_from_decimal(decimal * src, numeric *dst)
+PGTYPESnumeric_from_decimal(decimal * src, numeric * dst)
{
int i;
* Returns -1 on failure (overflow).
*/
int
-tm2timestamp(struct tm * tm, fsec_t fsec, int *tzp, timestamp *result)
+tm2timestamp(struct tm * tm, fsec_t fsec, int *tzp, timestamp * result)
{
#ifdef HAVE_INT64_TIMESTAMP
- int dDate;
+ int dDate;
int64 time;
#else
- double dDate, time;
+ double dDate,
+ time;
#endif
/* Julian day routines are not correct for negative Julian days */
timestamp2tm(timestamp dt, int *tzp, struct tm * tm, fsec_t *fsec, char **tzn)
{
#ifdef HAVE_INT64_TIMESTAMP
- int dDate, date0;
+ int dDate,
+ date0;
int64 time;
#else
- double dDate, date0;
+ double dDate,
+ date0;
double time;
#endif
time_t utime;
/* AdjustTimestampForTypmod(&result, typmod); */
- /* Since it's difficult to test for noresult, make sure errno is 0 if no error occured. */
+ /*
+ * Since it's difficult to test for noresult, make sure errno is 0 if
+ * no error occured.
+ */
errno = 0;
return result;
}
}
void
-PGTYPEStimestamp_current(timestamp *ts)
+PGTYPEStimestamp_current(timestamp * ts)
{
struct tm tm;
}
static int
-dttofmtasc_replace(timestamp *ts, date dDate, int dow, struct tm * tm,
+dttofmtasc_replace(timestamp * ts, date dDate, int dow, struct tm * tm,
char *output, int *pstr_len, char *fmtstr)
{
union un_fmt_comb replace_val;
int
-PGTYPEStimestamp_fmt_asc(timestamp *ts, char *output, int str_len, char *fmtstr)
+PGTYPEStimestamp_fmt_asc(timestamp * ts, char *output, int str_len, char *fmtstr)
{
struct tm tm;
fsec_t fsec;
}
int
-PGTYPEStimestamp_sub(timestamp *ts1, timestamp *ts2, interval *iv)
+PGTYPEStimestamp_sub(timestamp * ts1, timestamp * ts2, interval * iv)
{
if (TIMESTAMP_NOT_FINITE(*ts1) || TIMESTAMP_NOT_FINITE(*ts2))
return PGTYPES_TS_ERR_EINFTIME;
}
int
-PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp *d)
+PGTYPEStimestamp_defmt_asc(char *str, char *fmt, timestamp * d)
{
int year,
month,
descriptor_variable(const char *name, int input)
{
static char descriptor_names[2][MAX_DESCRIPTOR_NAMELEN];
- static const struct ECPGtype descriptor_type = { ECPGt_descriptor, 0 };
+ static const struct ECPGtype descriptor_type = {ECPGt_descriptor, 0};
static const struct variable varspace[2] = {
- { descriptor_names[0], (struct ECPGtype *) & descriptor_type, 0, NULL },
- { descriptor_names[1], (struct ECPGtype *) & descriptor_type, 0, NULL }
+ {descriptor_names[0], (struct ECPGtype *) & descriptor_type, 0, NULL},
+ {descriptor_names[1], (struct ECPGtype *) & descriptor_type, 0, NULL}
};
strncpy(descriptor_names[input], name, MAX_DESCRIPTOR_NAMELEN);
-/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.89 2004/07/20 18:06:41 meskes Exp $ */
+/* $PostgreSQL: pgsql/src/interfaces/ecpg/preproc/ecpg.c,v 1.90 2004/08/29 05:07:00 momjian Exp $ */
/* New main for ecpg, the PostgreSQL embedded SQL precompiler. */
/* (C) Michael Meskes Feb 5th, 1998 */
out_option = 0;
struct _include_path *ip;
const char *progname;
- char my_exec_path[MAXPGPATH];
- char include_path[MAXPGPATH];
-
+ char my_exec_path[MAXPGPATH];
+ char include_path[MAXPGPATH];
+
progname = get_progname(argv[0]);
if (argc > 1)
case 'C':
if (strncmp(optarg, "INFORMIX", strlen("INFORMIX")) == 0)
{
- char pkginclude_path[MAXPGPATH];
- char informix_path[MAXPGPATH];
-
+ char pkginclude_path[MAXPGPATH];
+ char informix_path[MAXPGPATH];
+
compat = (strcmp(optarg, "INFORMIX") == 0) ? ECPG_COMPAT_INFORMIX : ECPG_COMPAT_INFORMIX_SE;
/* system_includes = true; */
add_preprocessor_define("dec_t=decimal");
ptr2ext[0] = '.';
ptr2ext[1] = 'p';
ptr2ext[2] = 'g';
- ptr2ext[3] = (header_mode == true)? 'h' : 'c';
+ ptr2ext[3] = (header_mode == true) ? 'h' : 'c';
ptr2ext[4] = '\0';
}
ptr2ext = strrchr(output_filename, '.');
/* make extension = .c resp. .h */
- ptr2ext[1] = (header_mode == true)? 'h' : 'c';
+ ptr2ext[1] = (header_mode == true) ? 'h' : 'c';
ptr2ext[2] = '\0';
yyout = fopen(output_filename, PG_BINARY_W);
/* we need several includes */
/* but not if we are in header mode */
fprintf(yyout, "/* Processed by ecpg (%d.%d.%d) */\n", MAJOR_VERSION, MINOR_VERSION, PATCHLEVEL);
-
+
if (header_mode == false)
{
fprintf(yyout, "/* These include files are added by the preprocessor */\n#include \n#include \n#include \n#include \n");
}
fprintf(yyout, "#line 1 \"%s\"\n", input_filename);
-
+
/* and parse the source */
yyparse();
/* check if all cursors were indeed opened */
for (ptr = cur; ptr != NULL;)
{
- char errortext[128];
-
+ char errortext[128];
+
if (!(ptr->opened))
{
- /* Does not really make sense to declare a cursor but not open it */
+ /*
+ * Does not really make sense to declare a cursor
+ * but not open it
+ */
snprintf(errortext, sizeof(errortext), "cursor `%s´ has been declared but ot opened\n", ptr->name);
mmerror(PARSE_ERROR, ET_WARNING, errortext);
}
ptr = ptr->next;
}
-
+
if (yyin != NULL && yyin != stdin)
fclose(yyin);
if (out_option == 0 && yyout != stdout)
extern void yyerror(char *);
extern void *mm_alloc(size_t), *mm_realloc(void *, size_t);
extern char *mm_strdup(const char *);
-extern void mmerror(int, enum errortype, char *, ...);
+extern void mmerror(int, enum errortype, char *,...);
extern ScanKeyword *ScanECPGKeywordLookup(char *);
extern ScanKeyword *ScanCKeywordLookup(char *);
extern void output_get_descr_header(char *);
type = ECPGmake_struct_type(rm->type->u.members, rm->type->type, rm->type->struct_sizeof);
break;
case ECPGt_array:
- /* if this array does contain a struct again, we have to create the struct too */
+
+ /*
+ * if this array does contain a struct again, we have to
+ * create the struct too
+ */
if (rm->type->u.element->type == ECPGt_struct)
type = ECPGmake_struct_type(rm->type->u.element->u.members, rm->type->u.element->type, rm->type->u.element->struct_sizeof);
else
char *name;
char *command;
char *connection;
- bool opened;
+ bool opened;
struct arguments *argsinsert;
struct arguments *argsresult;
struct cursor *next;
struct typedefs
{
- char *name;
- struct this_type *type;
- struct ECPGstruct_member *struct_member_list;
- int brace_level;
- struct typedefs *next;
+ char *name;
+ struct this_type *type;
+ struct ECPGstruct_member *struct_member_list;
+ int brace_level;
+ struct typedefs *next;
};
struct _defines
{
- char *old;
- char *new;
- int pertinent;
- void *used;
+ char *old;
+ char *new;
+ int pertinent;
+ void *used;
struct _defines *next;
};
/* This is a linked list of the variable names and types. */
struct variable
{
- char *name;
+ char *name;
struct ECPGtype *type;
- int brace_level;
+ int brace_level;
struct variable *next;
};
else
prev->next = p->next;
- if (p->type->type_enum == ECPGt_struct || p->type->type_enum == ECPGt_union)
+ if (p->type->type_enum == ECPGt_struct || p->type->type_enum == ECPGt_union)
free(p->struct_member_list);
free(p->type);
free(p->name);
argsresult = NULL;
}
-/* Insert a new variable into our request list.
+/* Insert a new variable into our request list.
* Note: The list is dumped from the end,
* so we have to add new entries at the beginning */
void
* given bounds
*/
if (atoi(*dimension) < 0 && !type_definition)
+
/*
- * do not change this for typedefs
- * since it will be changed later on when the variable is defined
+ * do not change this for typedefs since it will be
+ * changed later on when the variable is defined
*/
*length = make_str("1");
else if (atoi(*dimension) == 0)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.283 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-connect.c,v 1.284 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
conn->status = CONNECTION_BAD;
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("invalid sslmode value: \"%s\"\n"),
+ libpq_gettext("invalid sslmode value: \"%s\"\n"),
conn->sslmode);
return false;
}
struct addrinfo hint;
const char *node = NULL;
int ret;
+
#ifdef ENABLE_THREAD_SAFETY
#ifndef WIN32
static pthread_once_t check_sigpipe_once = PTHREAD_ONCE_INIT;
}
/*
- * Wait for the postmaster to close the connection, which indicates that
- * it's processed the request. Without this delay, we might issue another
- * command only to find that our cancel zaps that command instead of the
- * one we thought we were canceling. Note we don't actually expect this
- * read to obtain any data, we are just waiting for EOF to be signaled.
+ * Wait for the postmaster to close the connection, which indicates
+ * that it's processed the request. Without this delay, we might
+ * issue another command only to find that our cancel zaps that
+ * command instead of the one we thought we were canceling. Note we
+ * don't actually expect this read to obtain any data, we are just
+ * waiting for EOF to be signaled.
*/
retry5:
if (recv(tmpsock, (char *) &crp, 1, 0) < 0)
service = getenv("PGSERVICE");
/*
- * This could be used by any application so we can't use the binary
- * location to find our config files.
- */
+ * This could be used by any application so we can't use the binary
+ * location to find our config files.
+ */
snprintf(serviceFile, MAXPGPATH, "%s/pg_service.conf",
- getenv("PGSYSCONFDIR") ? getenv("PGSYSCONFDIR") : SYSCONFDIR);
+ getenv("PGSYSCONFDIR") ? getenv("PGSYSCONFDIR") : SYSCONFDIR);
if (service != NULL)
{
port = DEF_PGPORT_STR;
/*
- * Look for it in the home dir.
- * We don't use get_home_path() so we don't pull path.c into our library.
+ * Look for it in the home dir. We don't use get_home_path() so we
+ * don't pull path.c into our library.
*/
if (!(home = getenv(HOMEDIR)))
return NULL;
-
+
pgpassfile = malloc(strlen(home) + 1 + strlen(PGPASSFILE) + 1);
if (!pgpassfile)
{
#ifdef ENABLE_THREAD_SAFETY
#ifndef WIN32
static pthread_mutex_t singlethread_lock = PTHREAD_MUTEX_INITIALIZER;
+
#else
static pthread_mutex_t singlethread_lock = NULL;
static long mutex_initlock = 0;
- if (singlethread_lock == NULL) {
- while(InterlockedExchange(&mutex_initlock, 1) == 1)
- /* loop, another thread own the lock */ ;
+ if (singlethread_lock == NULL)
+ {
+ while (InterlockedExchange(&mutex_initlock, 1) == 1)
+ /* loop, another thread own the lock */ ;
if (singlethread_lock == NULL)
pthread_mutex_init(&singlethread_lock, NULL);
- InterlockedExchange(&mutex_initlock,0);
+ InterlockedExchange(&mutex_initlock, 0);
}
#endif
if (acquire)
g_threadlock = default_threadlock;
return prev;
}
-
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.160 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-exec.c,v 1.161 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static bool PQsendQueryStart(PGconn *conn);
static int PQsendQueryGuts(PGconn *conn,
- const char *command,
- const char *stmtName,
- int nParams,
- const Oid *paramTypes,
- const char *const * paramValues,
- const int *paramLengths,
- const int *paramFormats,
- int resultFormat);
+ const char *command,
+ const char *stmtName,
+ int nParams,
+ const Oid *paramTypes,
+ const char *const * paramValues,
+ const int *paramLengths,
+ const int *paramFormats,
+ int resultFormat);
static void parseInput(PGconn *conn);
static bool PQexecStart(PGconn *conn);
static PGresult *PQexecFinish(PGconn *conn);
cnt = sscanf(value, "%d.%d.%d", &vmaj, &vmin, &vrev);
if (cnt < 2)
- conn->sversion = 0; /* unknown */
+ conn->sversion = 0; /* unknown */
else
{
if (cnt == 2)
if (!stmtName)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("statement name is a null pointer\n"));
+ libpq_gettext("statement name is a null pointer\n"));
return 0;
}
return PQsendQueryGuts(conn,
- NULL, /* no command to parse */
+ NULL, /* no command to parse */
stmtName,
nParams,
- NULL, /* no param types */
+ NULL, /* no param types */
paramValues,
paramLengths,
paramFormats,
if (PG_PROTOCOL_MAJOR(conn->pversion) < 3)
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("function requires at least protocol version 3.0\n"));
+ libpq_gettext("function requires at least protocol version 3.0\n"));
return 0;
}
/*
- * We will send Parse (if needed), Bind, Describe Portal, Execute, Sync,
- * using specified statement name and the unnamed portal.
+ * We will send Parse (if needed), Bind, Describe Portal, Execute,
+ * Sync, using specified statement name and the unnamed portal.
*/
if (command)
pqPutMsgEnd(conn) < 0)
return -1;
}
+
/*
* If we sent the COPY command in extended-query mode, we must
* issue a Sync as well.
{
/* Ooops, no way to do this in 2.0 */
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("function requires at least protocol version 3.0\n"));
+ libpq_gettext("function requires at least protocol version 3.0\n"));
return -1;
}
else
* downcasing in the frontend might follow different locale rules than
* downcasing in the backend...
*
- * Returns -1 if no match. In the present backend it is also possible
+ * Returns -1 if no match. In the present backend it is also possible
* to have multiple matches, in which case the first one is found.
*/
int
return -1;
/*
- * Note: it is correct to reject a zero-length input string; the proper
- * input to match a zero-length field name would be "".
+ * Note: it is correct to reject a zero-length input string; the
+ * proper input to match a zero-length field name would be "".
*/
if (field_name == NULL ||
field_name[0] == '\0' ||
/*
* Note: this code will not reject partially quoted strings, eg
- * foo"BAR"foo will become fooBARfoo when it probably ought to be
- * an error condition.
+ * foo"BAR"foo will become fooBARfoo when it probably ought to be an
+ * error condition.
*/
field_case = strdup(field_name);
if (field_case == NULL)
optr = field_case;
for (iptr = field_case; *iptr; iptr++)
{
- char c = *iptr;
+ char c = *iptr;
if (in_quotes)
{
*optr++ = c;
}
else if (c == '"')
- {
in_quotes = true;
- }
else
{
c = pg_tolower((unsigned char) c);
int
PQsetnonblocking(PGconn *conn, int arg)
{
- bool barg;
+ bool barg;
if (!conn || conn->status == CONNECTION_BAD)
return -1;
* '\'' == ASCII 39 == \'
* '\\' == ASCII 92 == \\\\
* anything < 0x20, or > 0x7e ---> \\ooo
- * (where ooo is an octal expression)
+ * (where ooo is an octal expression)
*/
unsigned char *
PQescapeBytea(const unsigned char *bintext, size_t binlen, size_t *bytealen)
return NULL;
strtextlen = strlen(strtext);
+
/*
* Length of input is max length of output, but add one to avoid
* unportable malloc(0) if input is zero-length.
if (buffer == NULL)
return NULL;
- for (i = j = 0; i < strtextlen; )
+ for (i = j = 0; i < strtextlen;)
{
switch (strtext[i])
{
(ISOCTDIGIT(strtext[i + 1])) &&
(ISOCTDIGIT(strtext[i + 2])))
{
- int byte;
+ int byte;
byte = OCTVAL(strtext[i++]);
byte = (byte << 3) + OCTVAL(strtext[i++]);
buffer[j++] = byte;
}
}
+
/*
- * Note: if we see '\' followed by something that isn't
- * a recognized escape sequence, we loop around having
- * done nothing except advance i. Therefore the something
- * will be emitted as ordinary data on the next cycle.
- * Corner case: '\' at end of string will just be discarded.
+ * Note: if we see '\' followed by something that isn't a
+ * recognized escape sequence, we loop around having done
+ * nothing except advance i. Therefore the something will
+ * be emitted as ordinary data on the next cycle. Corner
+ * case: '\' at end of string will just be discarded.
*/
break;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.50 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-lobj.c,v 1.51 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (close(fd))
{
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("error while writing to file \"%s\"\n"),
+ libpq_gettext("error while writing to file \"%s\"\n"),
filename);
return -1;
}
MemSet((char *) lobjfuncs, 0, sizeof(PGlobjfuncs));
/*
- * Execute the query to get all the functions at once. In 7.3 and later
- * we need to be schema-safe.
+ * Execute the query to get all the functions at once. In 7.3 and
+ * later we need to be schema-safe.
*/
if (conn->sversion >= 70300)
query = "select proname, oid from pg_catalog.pg_proc "
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.109 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-misc.c,v 1.110 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* There are scenarios in which we can't send data because the
- * communications channel is full, but we cannot expect the server
- * to clear the channel eventually because it's blocked trying to
- * send data to us. (This can happen when we are sending a large
- * amount of COPY data, and the server has generated lots of
- * NOTICE responses.) To avoid a deadlock situation, we must be
- * prepared to accept and buffer incoming data before we try
- * again. Furthermore, it is possible that such incoming data
- * might not arrive until after we've gone to sleep. Therefore,
- * we wait for either read ready or write ready.
+ * communications channel is full, but we cannot expect the
+ * server to clear the channel eventually because it's blocked
+ * trying to send data to us. (This can happen when we are
+ * sending a large amount of COPY data, and the server has
+ * generated lots of NOTICE responses.) To avoid a deadlock
+ * situation, we must be prepared to accept and buffer
+ * incoming data before we try again. Furthermore, it is
+ * possible that such incoming data might not arrive until
+ * after we've gone to sleep. Therefore, we wait for either
+ * read ready or write ready.
*/
if (pqReadData(conn) < 0)
{
* didn't really belong there.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.53 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-print.c,v 1.54 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
int total_line_length = 0;
int usePipe = 0;
char *pagerenv;
+
#if !defined(ENABLE_THREAD_SAFETY) && !defined(WIN32)
pqsigfunc oldsigpipehandler = NULL;
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.12 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol2.c,v 1.13 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* must use begin/commit in case autocommit is off by
* default in a 7.3 server.
*
- * Note: version() exists in all
- * protocol-2.0-supporting backends. In 7.3 it would
- * be safer to write pg_catalog.version(), but we can't
- * do that without causing problems on older versions.
+ * Note: version() exists in all protocol-2.0-supporting
+ * backends. In 7.3 it would be safer to write
+ * pg_catalog.version(), but we can't do that without
+ * causing problems on older versions.
*/
if (!PQsendQuery(conn, "begin; select version(); end"))
goto error_return;
else
{
/*
- * Error: presumably function not
- * available, so use PGCLIENTENCODING or
- * SQL_ASCII as the fallback.
+ * Error: presumably function not available,
+ * so use PGCLIENTENCODING or SQL_ASCII as the
+ * fallback.
*/
val = getenv("PGCLIENTENCODING");
if (val && *val)
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.14 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-protocol3.c,v 1.15 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
}
break;
case 'n': /* No Data */
+
/*
* NoData indicates that we will not be seeing a
* RowDescription message because the statement or
- * portal inquired about doesn't return rows.
- * Set up a COMMAND_OK result, instead of TUPLES_OK.
+ * portal inquired about doesn't return rows. Set up a
+ * COMMAND_OK result, instead of TUPLES_OK.
*/
if (conn->result == NULL)
conn->result = PQmakeEmptyPGresult(conn,
- PGRES_COMMAND_OK);
+ PGRES_COMMAND_OK);
break;
case 'D': /* Data Row */
if (conn->result != NULL &&
if (pqPutMsgStart('c', false, conn) < 0 ||
pqPutMsgEnd(conn) < 0)
return 1;
+
/*
* If we sent the COPY command in extended-query mode, we must
* issue a Sync as well.
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.48 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/fe-secure.c,v 1.49 2004/08/29 05:07:00 momjian Exp $
*
* NOTES
* The client *requires* a valid server certificate. Since
#endif
#ifdef USE_SSL
-bool pq_initssllib = true;
+bool pq_initssllib = true;
static SSL_CTX *SSL_context = NULL;
#endif
!SSL_set_app_data(conn->ssl, conn) ||
!SSL_set_fd(conn->ssl, conn->sock))
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not establish SSL connection: %s\n"),
err);
}
case SSL_ERROR_SSL:
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL error: %s\n"), err);
+ libpq_gettext("SSL error: %s\n"), err);
SSLerrfree(err);
}
/* fall through */
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code\n"));
+ libpq_gettext("unrecognized SSL error code\n"));
n = -1;
break;
}
}
case SSL_ERROR_SSL:
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL error: %s\n"), err);
+ libpq_gettext("SSL error: %s\n"), err);
SSLerrfree(err);
}
/* fall through */
break;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code\n"));
+ libpq_gettext("unrecognized SSL error code\n"));
n = -1;
break;
}
struct hostent hpstr;
char buf[BUFSIZ];
int herrno = 0;
-
+
/*
- * Currently, pqGethostbyname() is used only on platforms that
- * don't have getaddrinfo(). If you enable this function,
- * you should convert the pqGethostbyname() function call to
- * use getaddrinfo().
+ * Currently, pqGethostbyname() is used only on platforms that
+ * don't have getaddrinfo(). If you enable this function, you
+ * should convert the pqGethostbyname() function call to use
+ * getaddrinfo().
*/
pqGethostbyname(conn->peer_cn, &hpstr, buf, sizeof(buf),
&h, &herrno);
load_dh_file(int keylength)
{
#ifdef WIN32
- return NULL;
+ return NULL;
#else
char pwdbuf[BUFSIZ];
struct passwd pwdstr;
client_cert_cb(SSL *ssl, X509 **x509, EVP_PKEY **pkey)
{
#ifdef WIN32
- return 0;
+ return 0;
#else
char pwdbuf[BUFSIZ];
struct passwd pwdstr;
}
if (PEM_read_X509(fp, x509, NULL, NULL) == NULL)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not read certificate (%s): %s\n"),
fnbuf, err);
}
if (PEM_read_PrivateKey(fp, pkey, cb, NULL) == NULL)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not read private key (%s): %s\n"),
fnbuf, err);
/* verify that the cert and key go together */
if (!X509_check_private_key(*x509, *pkey))
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("certificate/private key mismatch (%s): %s\n"),
fnbuf, err);
static unsigned long
pq_threadidcallback(void)
{
- return (unsigned long)pthread_self();
+ return (unsigned long) pthread_self();
}
static pthread_mutex_t *pq_lockarray;
static void
pq_lockingcallback(int mode, int n, const char *file, int line)
{
- if (mode & CRYPTO_LOCK) {
+ if (mode & CRYPTO_LOCK)
pthread_mutex_lock(&pq_lockarray[n]);
- } else {
+ else
pthread_mutex_unlock(&pq_lockarray[n]);
- }
}
-
-#endif /* ENABLE_THREAD_SAFETY */
+#endif /* ENABLE_THREAD_SAFETY */
static int
init_ssl_system(PGconn *conn)
{
#ifdef ENABLE_THREAD_SAFETY
#ifndef WIN32
- static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
+ static pthread_mutex_t init_mutex = PTHREAD_MUTEX_INITIALIZER;
+
#else
static pthread_mutex_t init_mutex = NULL;
static long mutex_initlock = 0;
- if (init_mutex == NULL) {
- while(InterlockedExchange(&mutex_initlock, 1) == 1)
- /* loop, another thread own the lock */ ;
+ if (init_mutex == NULL)
+ {
+ while (InterlockedExchange(&mutex_initlock, 1) == 1)
+ /* loop, another thread own the lock */ ;
if (init_mutex == NULL)
pthread_mutex_init(&init_mutex, NULL);
- InterlockedExchange(&mutex_initlock,0);
+ InterlockedExchange(&mutex_initlock, 0);
}
#endif
pthread_mutex_lock(&init_mutex);
-
- if (pq_initssllib && pq_lockarray == NULL) {
- int i;
+
+ if (pq_initssllib && pq_lockarray == NULL)
+ {
+ int i;
+
CRYPTO_set_id_callback(pq_threadidcallback);
- pq_lockarray = malloc(sizeof(pthread_mutex_t)*CRYPTO_num_locks());
- if (!pq_lockarray) {
+ pq_lockarray = malloc(sizeof(pthread_mutex_t) * CRYPTO_num_locks());
+ if (!pq_lockarray)
+ {
pthread_mutex_unlock(&init_mutex);
return -1;
}
- for (i=0;ii++)
+ for (i = 0; i < CRYPTO_num_locks(); i++)
pthread_mutex_init(&pq_lockarray[i], NULL);
CRYPTO_set_locking_callback(pq_lockingcallback);
#endif
if (!SSL_context)
{
- if (pq_initssllib) {
+ if (pq_initssllib)
+ {
SSL_library_init();
SSL_load_error_strings();
}
SSL_context = SSL_CTX_new(TLSv1_method());
if (!SSL_context)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not create SSL context: %s\n"),
err);
#endif
return 0;
}
+
/*
* Initialize global SSL context.
*/
char fnbuf[2048];
#endif
- if(init_ssl_system(conn))
+ if (init_ssl_system(conn))
return -1;
#ifndef WIN32
}
if (!SSL_CTX_load_verify_locations(SSL_context, fnbuf, 0))
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("could not read root certificate list (%s): %s\n"),
fnbuf, err);
}
case SSL_ERROR_SSL:
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("SSL error: %s\n"), err);
+ libpq_gettext("SSL error: %s\n"), err);
SSLerrfree(err);
close_SSL(conn);
return PGRES_POLLING_FAILED;
default:
printfPQExpBuffer(&conn->errorMessage,
- libpq_gettext("unrecognized SSL error code\n"));
+ libpq_gettext("unrecognized SSL error code\n"));
close_SSL(conn);
return PGRES_POLLING_FAILED;
}
conn->peer = SSL_get_peer_certificate(conn->ssl);
if (conn->peer == NULL)
{
- char *err = SSLerrmessage();
+ char *err = SSLerrmessage();
+
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("certificate could not be obtained: %s\n"),
err);
* want to return NULL ever.
*/
static char ssl_nomem[] = "Out of memory allocating error description";
-#define SSL_ERR_LEN 128
+
+#define SSL_ERR_LEN 128
static char *
SSLerrmessage(void)
{
unsigned long errcode;
const char *errreason;
- char *errbuf;
+ char *errbuf;
errbuf = malloc(SSL_ERR_LEN);
if (!errbuf)
return ssl_nomem;
errcode = ERR_get_error();
- if (errcode == 0) {
+ if (errcode == 0)
+ {
strcpy(errbuf, "No SSL error reported");
return errbuf;
}
errreason = ERR_reason_error_string(errcode);
- if (errreason != NULL) {
- strncpy(errbuf, errreason, SSL_ERR_LEN-1);
- errbuf[SSL_ERR_LEN-1] = '\0';
+ if (errreason != NULL)
+ {
+ strncpy(errbuf, errreason, SSL_ERR_LEN - 1);
+ errbuf[SSL_ERR_LEN - 1] = '\0';
return errbuf;
}
snprintf(errbuf, SSL_ERR_LEN, "SSL error code %lu", errcode);
if (buf != ssl_nomem)
free(buf);
}
+
/*
* Return pointer to SSL object.
*/
return NULL;
return conn->ssl;
}
-
#endif /* USE_SSL */
pq_check_sigpipe_handler(void)
{
pthread_key_create(&pq_thread_in_send, NULL);
+
/*
- * Find current pipe handler and chain on to it.
+ * Find current pipe handler and chain on to it.
*/
pq_pipe_handler = pqsignalinquire(SIGPIPE);
pqsignal(SIGPIPE, sigpipe_handler_ignore_send);
sigpipe_handler_ignore_send(int signo)
{
/*
- * If we have gotten a SIGPIPE outside send(), chain or
- * exit if we are at the end of the chain.
- * Synchronous signals are delivered to the thread that
- * caused the signal.
+ * If we have gotten a SIGPIPE outside send(), chain or exit if we are
+ * at the end of the chain. Synchronous signals are delivered to the
+ * thread that caused the signal.
*/
if (!PQinSend())
{
- if (pq_pipe_handler == SIG_DFL) /* not set by application */
+ if (pq_pipe_handler == SIG_DFL) /* not set by application */
exit(128 + SIGPIPE); /* typical return value for SIG_DFL */
else
- (*pq_pipe_handler)(signo); /* call original handler */
+ (*pq_pipe_handler) (signo); /* call original handler */
}
}
#endif
#endif
-
+
/*
* Indicates whether the current thread is in send()
* For use by SIGPIPE signal handlers; they should
{
#ifdef ENABLE_THREAD_SAFETY
return (pthread_getspecific(pq_thread_in_send) /* has it been set? */ &&
- *(char *)pthread_getspecific(pq_thread_in_send) == 't') ? true : false;
+ *(char *) pthread_getspecific(pq_thread_in_send) == 't') ? true : false;
#else
+
/*
- * No threading: our code ignores SIGPIPE around send().
- * Therefore, we can't be in send() if we are checking
- * from a SIGPIPE signal handler.
+ * No threading: our code ignores SIGPIPE around send(). Therefore, we
+ * can't be in send() if we are checking from a SIGPIPE signal
+ * handler.
*/
- return false;
+ return false;
#endif
}
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.107 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-fe.h,v 1.108 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
extern const char *PQparameterStatus(const PGconn *conn,
const char *paramName);
extern int PQprotocolVersion(const PGconn *conn);
-extern int PQserverVersion(const PGconn *conn);
+extern int PQserverVersion(const PGconn *conn);
extern char *PQerrorMessage(const PGconn *conn);
extern int PQsocket(const PGconn *conn);
extern int PQbackendPID(const PGconn *conn);
void *arg);
/*
- * Used to set callback that prevents concurrent access to
- * non-thread safe functions that libpq needs.
- * The default implementation uses a libpq internal mutex.
- * Only required for multithreaded apps that use kerberos
- * both within their app and for postgresql connections.
+ * Used to set callback that prevents concurrent access to
+ * non-thread safe functions that libpq needs.
+ * The default implementation uses a libpq internal mutex.
+ * Only required for multithreaded apps that use kerberos
+ * both within their app and for postgresql connections.
*/
-typedef void (pgthreadlock_t)(int acquire);
+typedef void (pgthreadlock_t) (int acquire);
-extern pgthreadlock_t * PQregisterThreadLock(pgthreadlock_t *newhandler);
+extern pgthreadlock_t *PQregisterThreadLock(pgthreadlock_t *newhandler);
extern void PQinitSSL(int do_init);
const int *paramFormats,
int resultFormat);
extern PGresult *PQexecPrepared(PGconn *conn,
- const char *stmtName,
- int nParams,
- const char *const * paramValues,
- const int *paramLengths,
- const int *paramFormats,
- int resultFormat);
+ const char *stmtName,
+ int nParams,
+ const char *const * paramValues,
+ const int *paramLengths,
+ const int *paramFormats,
+ int resultFormat);
/* Interface for multiple-result or asynchronous queries */
extern int PQsendQuery(PGconn *conn, const char *query);
const int *paramFormats,
int resultFormat);
extern int PQsendQueryPrepared(PGconn *conn,
- const char *stmtName,
- int nParams,
- const char *const * paramValues,
- const int *paramLengths,
- const int *paramFormats,
- int resultFormat);
+ const char *stmtName,
+ int nParams,
+ const char *const * paramValues,
+ const int *paramLengths,
+ const int *paramFormats,
+ int resultFormat);
extern PGresult *PQgetResult(PGconn *conn);
/* Routines for managing an asynchronous query */
/* === in fe-print.c === */
-extern void PQprint(FILE *fout, /* output stream */
- const PGresult *res,
- const PQprintOpt *ps); /* option structure */
+extern void
+PQprint(FILE *fout, /* output stream */
+ const PGresult *res,
+ const PQprintOpt *ps); /* option structure */
/*
* really old printing routines
*/
-extern void PQdisplayTuples(const PGresult *res,
+extern void
+PQdisplayTuples(const PGresult *res,
FILE *fp, /* where to send the output */
int fillAlign, /* pad the fields with spaces */
const char *fieldSep, /* field separator */
int printHeader, /* display headers? */
int quiet);
-extern void PQprintTuples(const PGresult *res,
+extern void
+PQprintTuples(const PGresult *res,
FILE *fout, /* output stream */
int printAttName, /* print attribute names */
int terseOutput, /* delimiter bars */
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.90 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/libpq-int.h,v 1.91 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef ENABLE_THREAD_SAFETY
extern pgthreadlock_t *g_threadlock;
+
#define pglock_thread() g_threadlock(true);
#define pgunlock_thread() g_threadlock(false);
#else
#define pglock_thread() ((void)0)
#define pgunlock_thread() ((void)0)
#endif
-
+
/* === in fe-exec.c === */
extern void pqsecure_close(PGconn *);
extern ssize_t pqsecure_read(PGconn *, void *ptr, size_t len);
extern ssize_t pqsecure_write(PGconn *, const void *ptr, size_t len);
+
#ifdef ENABLE_THREAD_SAFETY
extern void pq_check_sigpipe_handler(void);
extern pthread_key_t pq_thread_in_send;
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/interfaces/libpq/pqexpbuffer.c,v 1.18 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/pqexpbuffer.c,v 1.19 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/*
* Guard against ridiculous "needed" values, which can occur if we're
- * fed bogus data. Without this, we can get an overflow or infinite
+ * fed bogus data. Without this, we can get an overflow or infinite
* loop in the following.
*/
if (needed >= ((size_t) INT_MAX - str->len))
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/interfaces/libpq/pqsignal.c,v 1.22 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/interfaces/libpq/pqsignal.c,v 1.23 2004/08/29 05:07:00 momjian Exp $
*
* NOTES
* This shouldn't be in libpq, but the monitor and some other
{
#ifndef WIN32
#if !defined(HAVE_POSIX_SIGNALS)
- pqsigfunc old_sigfunc;
- int old_sigmask;
+ pqsigfunc old_sigfunc;
+ int old_sigmask;
/* Prevent signal handler calls during test */
old_sigmask = sigblock(sigmask(signo));
- old_sigfunc = signal(signo, SIG_DFL);
+ old_sigfunc = signal(signo, SIG_DFL);
signal(signo, old_sigfunc);
sigblock(old_sigmask);
return old_sigfunc;
struct sigaction oact;
if (sigaction(signo, NULL, &oact) < 0)
- return SIG_ERR;
+ return SIG_ERR;
return oact.sa_handler;
#endif /* !HAVE_POSIX_SIGNALS */
#else
/*-------------------------------------------------------------------------
*
* pthread-win32.c
-* partial pthread implementation for win32
+* partial pthread implementation for win32
*
* Copyright (c) 2004, PostgreSQL Global Development Group
* IDENTIFICATION
-* $PostgreSQL: pgsql/src/interfaces/libpq/pthread-win32.c,v 1.1 2004/06/19 04:22:17 momjian Exp $
+* $PostgreSQL: pgsql/src/interfaces/libpq/pthread-win32.c,v 1.2 2004/08/29 05:07:00 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include "windows.h"
#include "pthread.h"
-HANDLE pthread_self()
+HANDLE
+pthread_self()
{
- return GetCurrentThread();
+ return GetCurrentThread();
}
-void pthread_setspecific(pthread_key_t key, void *val)
+void
+pthread_setspecific(pthread_key_t key, void *val)
{
}
-void *pthread_getspecific(pthread_key_t key)
+void *
+pthread_getspecific(pthread_key_t key)
{
- return NULL;
+ return NULL;
}
-void pthread_mutex_init(pthread_mutex_t *mp, void *attr)
+void
+pthread_mutex_init(pthread_mutex_t *mp, void *attr)
{
- *mp = CreateMutex(0, 0, 0);
+ *mp = CreateMutex(0, 0, 0);
}
-void pthread_mutex_lock(pthread_mutex_t *mp)
+void
+pthread_mutex_lock(pthread_mutex_t *mp)
{
- WaitForSingleObject(*mp, INFINITE);
+ WaitForSingleObject(*mp, INFINITE);
}
-void pthread_mutex_unlock(pthread_mutex_t *mp)
+void
+pthread_mutex_unlock(pthread_mutex_t *mp)
{
- ReleaseMutex(*mp);
+ ReleaseMutex(*mp);
}
{
strerrbuf[buflen - 1] = '\0';
offs = strlen(strerrbuf);
- if (offs > (int)buflen - 64)
+ if (offs > (int) buflen - 64)
offs = buflen - 64;
sprintf(strerrbuf + offs, " (0x%08X/%i)", err, err);
}
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.48 2004/07/31 00:45:44 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plperl/plperl.c,v 1.49 2004/08/29 05:07:01 momjian Exp $
*
**********************************************************************/
CommandId fn_cmin;
bool lanpltrusted;
bool fn_retistuple; /* true, if function returns tuple */
- bool fn_retisset; /*true, if function returns set*/
+ bool fn_retisset; /* true, if function returns set */
Oid ret_oid; /* Oid of returning type */
FmgrInfo result_in_func;
Oid result_typioparam;
static bool plperl_safe_init_done = false;
static PerlInterpreter *plperl_interp = NULL;
static HV *plperl_proc_hash = NULL;
-static AV *g_row_keys = NULL;
-static AV *g_column_keys = NULL;
-static SV *srf_perlret=NULL; /*keep returned value*/
-static int g_attr_num = 0;
+static AV *g_row_keys = NULL;
+static AV *g_column_keys = NULL;
+static SV *srf_perlret = NULL; /* keep returned value */
+static int g_attr_num = 0;
/**********************************************************************
* Forward declarations
"", "-e",
/*
- * no commas between the next lines please. They are supposed to be
- * one string
+ * no commas between the next lines please. They are supposed to
+ * be one string
*/
"SPI::bootstrap(); use vars qw(%_SHARED);"
"sub ::mkunsafefunc {return eval(qq[ sub { $_[0] $_[1] } ]); }"
static void
plperl_safe_init(void)
{
- static char *safe_module =
- "require Safe; $Safe::VERSION";
+ static char *safe_module =
+ "require Safe; $Safe::VERSION";
- static char * safe_ok =
- "use vars qw($PLContainer); $PLContainer = new Safe('PLPerl');"
- "$PLContainer->permit_only(':default');$PLContainer->permit(':base_math');"
- "$PLContainer->share(qw[&elog &spi_exec_query &DEBUG &LOG &INFO &NOTICE &WARNING &ERROR %SHARED ]);"
- "sub ::mksafefunc { return $PLContainer->reval(qq[sub { $_[0] $_[1]}]); }"
- ;
+ static char *safe_ok =
+ "use vars qw($PLContainer); $PLContainer = new Safe('PLPerl');"
+ "$PLContainer->permit_only(':default');$PLContainer->permit(':base_math');"
+ "$PLContainer->share(qw[&elog &spi_exec_query &DEBUG &LOG &INFO &NOTICE &WARNING &ERROR %SHARED ]);"
+ "sub ::mksafefunc { return $PLContainer->reval(qq[sub { $_[0] $_[1]}]); }"
+ ;
- static char * safe_bad =
- "use vars qw($PLContainer); $PLContainer = new Safe('PLPerl');"
- "$PLContainer->permit_only(':default');$PLContainer->permit(':base_math');"
- "$PLContainer->share(qw[&elog &DEBUG &LOG &INFO &NOTICE &WARNING &ERROR %SHARED ]);"
- "sub ::mksafefunc { return $PLContainer->reval(qq[sub { "
- "elog(ERROR,'trusted perl functions disabled - please upgrade perl Safe module to at least 2.09');}]); }"
- ;
+ static char *safe_bad =
+ "use vars qw($PLContainer); $PLContainer = new Safe('PLPerl');"
+ "$PLContainer->permit_only(':default');$PLContainer->permit(':base_math');"
+ "$PLContainer->share(qw[&elog &DEBUG &LOG &INFO &NOTICE &WARNING &ERROR %SHARED ]);"
+ "sub ::mksafefunc { return $PLContainer->reval(qq[sub { "
+ "elog(ERROR,'trusted perl functions disabled - please upgrade perl Safe module to at least 2.09');}]); }"
+ ;
- SV * res;
+ SV *res;
- float safe_version;
+ float safe_version;
- res = eval_pv(safe_module,FALSE); /* TRUE = croak if failure */
+ res = eval_pv(safe_module, FALSE); /* TRUE = croak if failure */
safe_version = SvNV(res);
- eval_pv((safe_version < 2.09 ? safe_bad : safe_ok),FALSE);
+ eval_pv((safe_version < 2.09 ? safe_bad : safe_ok), FALSE);
plperl_safe_init_done = true;
}
/**********************************************************************
* extract a list of keys from a hash
**********************************************************************/
-static AV *
+static AV *
plperl_get_keys(HV * hv)
{
AV *ret;
elog(ERROR, "plperl: $_TD->{new} is not a hash");
plkeys = plperl_get_keys(hvNew);
- natts = av_len(plkeys)+1;
- if (natts != tupdesc->natts)
- elog(ERROR, "plperl: $_TD->{new} has an incorrect number of keys.");
+ natts = av_len(plkeys) + 1;
+ if (natts != tupdesc->natts)
+ elog(ERROR, "plperl: $_TD->{new} has an incorrect number of keys.");
modattrs = palloc0(natts * sizeof(int));
modvalues = palloc0(natts * sizeof(Datum));
modvalues[i] = FunctionCall3(&finfo,
CStringGetDatum(plval),
ObjectIdGetDatum(typelem),
- Int32GetDatum(tupdesc->attrs[atti]->atttypmod));
+ Int32GetDatum(tupdesc->attrs[atti]->atttypmod));
modnulls[i] = ' ';
}
else
SV *subref;
int count;
- if(trusted && !plperl_safe_init_done)
+ if (trusted && !plperl_safe_init_done)
plperl_safe_init();
ENTER;
tmp = DatumGetCString(FunctionCall3(&(desc->arg_out_func[i]),
fcinfo->arg[i],
- ObjectIdGetDatum(desc->arg_typioparam[i]),
+ ObjectIdGetDatum(desc->arg_typioparam[i]),
Int32GetDatum(-1)));
XPUSHs(sv_2mortal(newSVpv(tmp, 0)));
pfree(tmp);
/************************************************************
* Call the Perl function if not returning set
************************************************************/
- if (!prodesc->fn_retisset)
- perlret = plperl_call_perl_func(prodesc, fcinfo);
- else
+ if (!prodesc->fn_retisset)
+ perlret = plperl_call_perl_func(prodesc, fcinfo);
+ else
{
- if (SRF_IS_FIRSTCALL()) /*call function only once*/
+ if (SRF_IS_FIRSTCALL()) /* call function only once */
srf_perlret = plperl_call_perl_func(prodesc, fcinfo);
perlret = srf_perlret;
- }
+ }
- if (prodesc->fn_retisset && SRF_IS_FIRSTCALL())
- {
+ if (prodesc->fn_retisset && SRF_IS_FIRSTCALL())
+ {
if (prodesc->fn_retistuple)
g_column_keys = newAV();
if (SvTYPE(perlret) != SVt_RV)
- elog(ERROR, "plperl: set-returning function must return reference");
+ elog(ERROR, "plperl: set-returning function must return reference");
}
/************************************************************
if (SPI_finish() != SPI_OK_FINISH)
elog(ERROR, "SPI_finish() failed");
- if (!(perlret && SvOK(perlret) && SvTYPE(perlret)!=SVt_NULL ))
+ if (!(perlret && SvOK(perlret) && SvTYPE(perlret) != SVt_NULL))
{
/* return NULL if Perl code returned undef */
retval = (Datum) 0;
if (prodesc->fn_retistuple && perlret && SvTYPE(perlret) != SVt_RV)
elog(ERROR, "plperl: composite-returning function must return a reference");
- if (prodesc->fn_retistuple && fcinfo->resultinfo ) /* set of tuples */
+ if (prodesc->fn_retistuple && fcinfo->resultinfo) /* set of tuples */
{
/* SRF support */
HV *ret_hv;
AttInMetadata *attinmeta;
bool isset = 0;
char **values = NULL;
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
if (prodesc->fn_retisset && !rsinfo)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
- errmsg("returning a composite type is not allowed in this context"),
- errhint("This function is intended for use in the FROM clause.")));
+ errmsg("returning a composite type is not allowed in this context"),
+ errhint("This function is intended for use in the FROM clause.")));
isset = plperl_is_set(perlret);
values[i] = NULL;
}
}
- else
- {
+ else
+ {
int i;
values = (char **) palloc(g_attr_num * sizeof(char *));
SRF_RETURN_DONE(funcctx);
}
}
- else if (prodesc->fn_retisset) /* set of non-tuples */
+ else if (prodesc->fn_retisset) /* set of non-tuples */
{
- FuncCallContext *funcctx;
-
+ FuncCallContext *funcctx;
+
if (SRF_IS_FIRSTCALL())
{
MemoryContext oldcontext;
- int i;
+ int i;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
funcctx->max_calls = av_len((AV *) SvRV(perlret)) + 1;
}
-
+
funcctx = SRF_PERCALL_SETUP();
-
+
if (funcctx->call_cntr < funcctx->max_calls)
{
Datum result;
- AV* array;
- SV** svp;
- int i;
+ AV *array;
+ SV **svp;
+ int i;
- array = (AV*)SvRV(perlret);
+ array = (AV *) SvRV(perlret);
svp = av_fetch(array, funcctx->call_cntr, FALSE);
if (SvTYPE(*svp) != SVt_NULL)
result = FunctionCall3(&prodesc->result_in_func,
- PointerGetDatum(SvPV(*svp, PL_na)),
- ObjectIdGetDatum(prodesc->result_typioparam),
- Int32GetDatum(-1));
+ PointerGetDatum(SvPV(*svp, PL_na)),
+ ObjectIdGetDatum(prodesc->result_typioparam),
+ Int32GetDatum(-1));
else
{
fcinfo->isnull = true;
}
SRF_RETURN_NEXT(funcctx, result);
fcinfo->isnull = false;
- }
+ }
else
{
if (perlret)
SvREFCNT_dec(perlret);
SRF_RETURN_DONE(funcctx);
}
- }
- else if (!fcinfo->isnull) /* non-null singleton */
+ }
+ else if (!fcinfo->isnull) /* non-null singleton */
{
- if (prodesc->fn_retistuple) /* singleton perl hash to Datum */
+ if (prodesc->fn_retistuple) /* singleton perl hash to Datum */
{
- TupleDesc td = lookup_rowtype_tupdesc(prodesc->ret_oid,(int32)-1);
- HV * perlhash = (HV *) SvRV(perlret);
- int i;
- char **values;
- char * key, *val;
+ TupleDesc td = lookup_rowtype_tupdesc(prodesc->ret_oid, (int32) -1);
+ HV *perlhash = (HV *) SvRV(perlret);
+ int i;
+ char **values;
+ char *key,
+ *val;
AttInMetadata *attinmeta;
- HeapTuple tup;
+ HeapTuple tup;
if (!td)
ereport(ERROR,
for (i = 0; i < td->natts; i++)
{
- key = SPI_fname(td,i+1);
+ key = SPI_fname(td, i + 1);
val = plperl_get_elem(perlhash, key);
if (val)
values[i] = val;
attinmeta = TupleDescGetAttInMetadata(td);
tup = BuildTupleFromCStrings(attinmeta, values);
retval = HeapTupleGetDatum(tup);
-
+
}
- else /* perl string to Datum */
+ else
+/* perl string to Datum */
- retval = FunctionCall3(&prodesc->result_in_func,
- PointerGetDatum(SvPV(perlret, PL_na)),
- ObjectIdGetDatum(prodesc->result_typioparam),
- Int32GetDatum(-1));
+ retval = FunctionCall3(&prodesc->result_in_func,
+ PointerGetDatum(SvPV(perlret, PL_na)),
+ ObjectIdGetDatum(prodesc->result_typioparam),
+ Int32GetDatum(-1));
}
/************************************************************
* Call the Perl function
************************************************************/
+
/*
- * call perl trigger function and build TD hash
- */
+ * call perl trigger function and build TD hash
+ */
svTD = plperl_trigger_build_args(fcinfo);
perlret = plperl_call_perl_trigger_func(prodesc, fcinfo, svTD);
if (typeStruct->typtype == 'c' || procStruct->prorettype == RECORDOID)
{
prodesc->fn_retistuple = true;
- prodesc->ret_oid =
- procStruct->prorettype == RECORDOID ?
- typeStruct->typrelid :
+ prodesc->ret_oid =
+ procStruct->prorettype == RECORDOID ?
+ typeStruct->typrelid :
procStruct->prorettype;
}
************************************************************/
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
sv_catpvf(output, "'%s' => '%s',", attname, outputstr);
pfree(outputstr);
-/* ppport.h -- Perl/Pollution/Portability Version 2.011
+/* ppport.h -- Perl/Pollution/Portability Version 2.011
*
- * Automatically Created by Devel::PPPort on Sun Jul 4 09:11:52 2004
+ * Automatically Created by Devel::PPPort on Sun Jul 4 09:11:52 2004
*
* Do NOT edit this file directly! -- Edit PPPort.pm instead.
*
* Version 1.x, Copyright (C) 1999, Kenneth Albanowski.
* This code may be used and distributed under the same license as any
* version of Perl.
- *
+ *
* This version of ppport.h is designed to support operation with Perl
* installations back to 5.004, and has been tested up to 5.8.1.
*
*
* Include all following information:
*
- * 1. The complete output from running "perl -V"
+ * 1. The complete output from running "perl -V"
*
- * 2. This file.
+ * 2. This file.
*
- * 3. The name & version of the module you were trying to build.
+ * 3. The name & version of the module you were trying to build.
*
- * 4. A full log of the build that failed.
+ * 4. A full log of the build that failed.
*
- * 5. Any other information that you think could be relevant.
+ * 5. Any other information that you think could be relevant.
*
*
* For the latest version of this code, please retreive the Devel::PPPort
* module from CPAN.
- *
+ *
*/
/*
* for a static include, or use the GLOBAL request in a single module to
* produce a global definition that can be referenced from the other
* modules.
- *
- * Function: Static define: Extern define:
- * newCONSTSUB() NEED_newCONSTSUB NEED_newCONSTSUB_GLOBAL
+ *
+ * Function: Static define: Extern define:
+ * newCONSTSUB() NEED_newCONSTSUB NEED_newCONSTSUB_GLOBAL
*
*/
-
+
/* To verify whether ppport.h is needed for your module, and whether any
* special defines should be used, ppport.h can be run through Perl to check
* your source code. Simply say:
- *
- * perl -x ppport.h *.c *.h *.xs foo/bar*.c [etc]
- *
+ *
+ * perl -x ppport.h *.c *.h *.xs foo/bar*.c [etc]
+ *
* The result will be a list of patches suggesting changes that should at
* least be acceptable, if not necessarily the most efficient solution, or a
* fix for all possible problems. It won't catch where dTHR is needed, and
* doesn't attempt to account for global macro or function definitions,
* nested includes, typemaps, etc.
- *
+ *
* In order to test for the need of dTHR, please try your module under a
* recent version of Perl that has threading compiled-in.
*
- */
+ */
/*
$need_include = 1;
}
}
-
+
if (scalar(keys %add_func) or $need_include != $has_include) {
if (!$has_include) {
$inc = join('',map("#define NEED_$_\n", sort keys %add_func)).
- "#include \"ppport.h\"\n";
+ "#include \"ppport.h\"\n";
$c = "$inc$c" unless $c =~ s/#.*include.*XSUB.*\n/$&$inc/m;
} elsif (keys %add_func) {
$inc = join('',map("#define NEED_$_\n", sort keys %add_func));
}
$changes++;
}
-
+
if ($changes) {
open(OUT,">/tmp/ppport.h.$$");
print OUT $c;
#define _P_P_PORTABILITY_H_
#ifndef PERL_REVISION
-# ifndef __PATCHLEVEL_H_INCLUDED__
-# define PERL_PATCHLEVEL_H_IMPLICIT
-# endif
-# if !(defined(PERL_VERSION) || (defined(SUBVERSION) && defined(PATCHLEVEL)))
-# include
-# endif
-# ifndef PERL_REVISION
-# define PERL_REVISION (5)
- /* Replace: 1 */
-# define PERL_VERSION PATCHLEVEL
-# define PERL_SUBVERSION SUBVERSION
- /* Replace PERL_PATCHLEVEL with PERL_VERSION */
- /* Replace: 0 */
-# endif
+#ifndef __PATCHLEVEL_H_INCLUDED__
+#define PERL_PATCHLEVEL_H_IMPLICIT
+#endif
+#if !(defined(PERL_VERSION) || (defined(SUBVERSION) && defined(PATCHLEVEL)))
+#include
+#endif
+#ifndef PERL_REVISION
+#define PERL_REVISION (5)
+ /* Replace: 1 */
+#define PERL_VERSION PATCHLEVEL
+#define PERL_SUBVERSION SUBVERSION
+ /* Replace PERL_PATCHLEVEL with PERL_VERSION */
+ /* Replace: 0 */
+#endif
#endif
#define PERL_BCDVERSION ((PERL_REVISION * 0x1000000L) + (PERL_VERSION * 0x1000L) + PERL_SUBVERSION)
-/* It is very unlikely that anyone will try to use this with Perl 6
+/* It is very unlikely that anyone will try to use this with Perl 6
(or greater), but who knows.
*/
#if PERL_REVISION != 5
-# error ppport.h only works with Perl version 5
-#endif /* PERL_REVISION != 5 */
+#error ppport.h only works with Perl version 5
+#endif /* PERL_REVISION != 5 */
#ifndef ERRSV
-# define ERRSV perl_get_sv("@",FALSE)
+#define ERRSV perl_get_sv("@",FALSE)
#endif
#if (PERL_VERSION < 4) || ((PERL_VERSION == 4) && (PERL_SUBVERSION <= 5))
/* Replace: 1 */
-# define PL_Sv Sv
-# define PL_compiling compiling
-# define PL_copline copline
-# define PL_curcop curcop
-# define PL_curstash curstash
-# define PL_defgv defgv
-# define PL_dirty dirty
-# define PL_dowarn dowarn
-# define PL_hints hints
-# define PL_na na
-# define PL_perldb perldb
-# define PL_rsfp_filters rsfp_filters
-# define PL_rsfpv rsfp
-# define PL_stdingv stdingv
-# define PL_sv_no sv_no
-# define PL_sv_undef sv_undef
-# define PL_sv_yes sv_yes
+#define PL_Sv Sv
+#define PL_compiling compiling
+#define PL_copline copline
+#define PL_curcop curcop
+#define PL_curstash curstash
+#define PL_defgv defgv
+#define PL_dirty dirty
+#define PL_dowarn dowarn
+#define PL_hints hints
+#define PL_na na
+#define PL_perldb perldb
+#define PL_rsfp_filters rsfp_filters
+#define PL_rsfpv rsfp
+#define PL_stdingv stdingv
+#define PL_sv_no sv_no
+#define PL_sv_undef sv_undef
+#define PL_sv_yes sv_yes
/* Replace: 0 */
#endif
#ifdef HASATTRIBUTE
-# if (defined(__GNUC__) && defined(__cplusplus)) || defined(__INTEL_COMPILER)
-# define PERL_UNUSED_DECL
-# else
-# define PERL_UNUSED_DECL __attribute__((unused))
-# endif
+#if (defined(__GNUC__) && defined(__cplusplus)) || defined(__INTEL_COMPILER)
+#define PERL_UNUSED_DECL
+#else
+#define PERL_UNUSED_DECL __attribute__((unused))
+#endif
#else
-# define PERL_UNUSED_DECL
+#define PERL_UNUSED_DECL
#endif
#ifndef dNOOP
-# define NOOP (void)0
-# define dNOOP extern int Perl___notused PERL_UNUSED_DECL
+#define NOOP (void)0
+#define dNOOP extern int Perl___notused PERL_UNUSED_DECL
#endif
#ifndef dTHR
-# define dTHR dNOOP
+#define dTHR dNOOP
#endif
#ifndef dTHX
-# define dTHX dNOOP
-# define dTHXa(x) dNOOP
-# define dTHXoa(x) dNOOP
+#define dTHX dNOOP
+#define dTHXa(x) dNOOP
+#define dTHXoa(x) dNOOP
#endif
#ifndef pTHX
-# define pTHX void
-# define pTHX_
-# define aTHX
-# define aTHX_
-#endif
+#define pTHX void
+#define pTHX_
+#define aTHX
+#define aTHX_
+#endif
#ifndef dAX
-# define dAX I32 ax = MARK - PL_stack_base + 1
+#define dAX I32 ax = MARK - PL_stack_base + 1
#endif
#ifndef dITEMS
-# define dITEMS I32 items = SP - MARK
+#define dITEMS I32 items = SP - MARK
#endif
/* IV could also be a quad (say, a long long), but Perls
* capable of those should have IVSIZE already. */
#if !defined(IVSIZE) && defined(LONGSIZE)
-# define IVSIZE LONGSIZE
+#define IVSIZE LONGSIZE
#endif
#ifndef IVSIZE
-# define IVSIZE 4 /* A bold guess, but the best we can make. */
+#define IVSIZE 4 /* A bold guess, but the best we can make. */
#endif
#ifndef UVSIZE
-# define UVSIZE IVSIZE
+#define UVSIZE IVSIZE
#endif
#ifndef NVTYPE
-# if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE)
-# define NVTYPE long double
-# else
-# define NVTYPE double
-# endif
+#if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE)
+#define NVTYPE long double
+#else
+#define NVTYPE double
+#endif
typedef NVTYPE NV;
#endif
#ifndef INT2PTR
#if (IVSIZE == PTRSIZE) && (UVSIZE == PTRSIZE)
-# define PTRV UV
-# define INT2PTR(any,d) (any)(d)
+#define PTRV UV
+#define INT2PTR(any,d) (any)(d)
#else
-# if PTRSIZE == LONGSIZE
-# define PTRV unsigned long
-# else
-# define PTRV unsigned
-# endif
-# define INT2PTR(any,d) (any)(PTRV)(d)
-#endif
-#define NUM2PTR(any,d) (any)(PTRV)(d)
-#define PTR2IV(p) INT2PTR(IV,p)
-#define PTR2UV(p) INT2PTR(UV,p)
-#define PTR2NV(p) NUM2PTR(NV,p)
#if PTRSIZE == LONGSIZE
-# define PTR2ul(p) (unsigned long)(p)
+#define PTRV unsigned long
#else
-# define PTR2ul(p) INT2PTR(unsigned long,p)
+#define PTRV unsigned
#endif
-
-#endif /* !INT2PTR */
+#define INT2PTR(any,d) (any)(PTRV)(d)
+#endif
+#define NUM2PTR(any,d) (any)(PTRV)(d)
+#define PTR2IV(p) INT2PTR(IV,p)
+#define PTR2UV(p) INT2PTR(UV,p)
+#define PTR2NV(p) NUM2PTR(NV,p)
+#if PTRSIZE == LONGSIZE
+#define PTR2ul(p) (unsigned long)(p)
+#else
+#define PTR2ul(p) INT2PTR(unsigned long,p)
+#endif
+#endif /* !INT2PTR */
#ifndef boolSV
-# define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
+#define boolSV(b) ((b) ? &PL_sv_yes : &PL_sv_no)
#endif
#ifndef gv_stashpvn
-# define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
+#define gv_stashpvn(str,len,flags) gv_stashpv(str,flags)
#endif
#ifndef newSVpvn
-# define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
+#define newSVpvn(data,len) ((len) ? newSVpv ((data), (len)) : newSVpv ("", 0))
#endif
#ifndef newRV_inc
/* Replace: 1 */
-# define newRV_inc(sv) newRV(sv)
+#define newRV_inc(sv) newRV(sv)
/* Replace: 0 */
#endif
/* DEFSV appears first in 5.004_56 */
#ifndef DEFSV
-# define DEFSV GvSV(PL_defgv)
+#define DEFSV GvSV(PL_defgv)
#endif
#ifndef SAVE_DEFSV
-# define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
+#define SAVE_DEFSV SAVESPTR(GvSV(PL_defgv))
#endif
#ifndef newRV_noinc
-# ifdef __GNUC__
-# define newRV_noinc(sv) \
- ({ \
- SV *nsv = (SV*)newRV(sv); \
- SvREFCNT_dec(sv); \
- nsv; \
- })
-# else
-# if defined(USE_THREADS)
-static SV * newRV_noinc (SV * sv)
+#ifdef __GNUC__
+#define newRV_noinc(sv) \
+ ({ \
+ SV *nsv = (SV*)newRV(sv); \
+ SvREFCNT_dec(sv); \
+ nsv; \
+ })
+#else
+#if defined(USE_THREADS)
+static SV *
+newRV_noinc(SV * sv)
{
- SV *nsv = (SV*)newRV(sv);
- SvREFCNT_dec(sv);
- return nsv;
+ SV *nsv = (SV *) newRV(sv);
+
+ SvREFCNT_dec(sv);
+ return nsv;
}
-# else
-# define newRV_noinc(sv) \
- (PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
-# endif
-# endif
+
+#else
+#define newRV_noinc(sv) \
+ (PL_Sv=(SV*)newRV(sv), SvREFCNT_dec(sv), (SV*)PL_Sv)
+#endif
+#endif
#endif
/* Provide: newCONSTSUB */
#if defined(NEED_newCONSTSUB)
static
#else
-extern void newCONSTSUB(HV * stash, char * name, SV *sv);
+extern void newCONSTSUB(HV * stash, char *name, SV * sv);
#endif
#if defined(NEED_newCONSTSUB) || defined(NEED_newCONSTSUB_GLOBAL)
void
-newCONSTSUB(stash,name,sv)
-HV *stash;
-char *name;
-SV *sv;
+newCONSTSUB(stash, name, sv)
+HV *stash;
+char *name;
+SV *sv;
{
- U32 oldhints = PL_hints;
- HV *old_cop_stash = PL_curcop->cop_stash;
- HV *old_curstash = PL_curstash;
- line_t oldline = PL_curcop->cop_line;
+ U32 oldhints = PL_hints;
+ HV *old_cop_stash = PL_curcop->cop_stash;
+ HV *old_curstash = PL_curstash;
+ line_t oldline = PL_curcop->cop_line;
+
PL_curcop->cop_line = PL_copline;
PL_hints &= ~HINT_BLOCK_SCOPE;
newSUB(
#if (PERL_VERSION < 3) || ((PERL_VERSION == 3) && (PERL_SUBVERSION < 22))
- /* before 5.003_22 */
- start_subparse(),
+ /* before 5.003_22 */
+ start_subparse(),
#else
-# if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
- /* 5.003_22 */
- start_subparse(0),
-# else
- /* 5.003_23 onwards */
- start_subparse(FALSE, 0),
-# endif
+#if (PERL_VERSION == 3) && (PERL_SUBVERSION == 22)
+ /* 5.003_22 */
+ start_subparse(0),
+#else
+ /* 5.003_23 onwards */
+ start_subparse(FALSE, 0),
+#endif
#endif
- newSVOP(OP_CONST, 0, newSVpv(name,0)),
- newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == "" -- GMB */
- newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
- );
+ newSVOP(OP_CONST, 0, newSVpv(name, 0)),
+ newSVOP(OP_CONST, 0, &PL_sv_no), /* SvPV(&PL_sv_no) == ""
+ * -- GMB */
+ newSTATEOP(0, Nullch, newSVOP(OP_CONST, 0, sv))
+ );
PL_hints = oldhints;
PL_curcop->cop_stash = old_cop_stash;
PL_curcop->cop_line = oldline;
}
#endif
-
-#endif /* newCONSTSUB */
+#endif /* newCONSTSUB */
#ifndef START_MY_CXT
* Code that uses these macros is responsible for the following:
* 1. #define MY_CXT_KEY to a unique string, e.g. "DynaLoader_guts"
* 2. Declare a typedef named my_cxt_t that is a structure that contains
- * all the data that needs to be interpreter-local.
+ * all the data that needs to be interpreter-local.
* 3. Use the START_MY_CXT macro after the declaration of my_cxt_t.
* 4. Use the MY_CXT_INIT macro such that it is called exactly once
- * (typically put in the BOOT: section).
+ * (typically put in the BOOT: section).
* 5. Use the members of the my_cxt_t structure everywhere as
- * MY_CXT.member.
+ * MY_CXT.member.
* 6. Use the dMY_CXT macro (a declaration) in all the functions that
- * access MY_CXT.
+ * access MY_CXT.
*/
#if defined(MULTIPLICITY) || defined(PERL_OBJECT) || \
- defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
+ defined(PERL_CAPI) || defined(PERL_IMPLICIT_CONTEXT)
/* This must appear in all extensions that define a my_cxt_t structure,
* right after the definition (i.e. at file scope). The non-threads
/* Fetches the SV that keeps the per-interpreter data. */
#define dMY_CXT_SV \
SV *my_cxt_sv = perl_get_sv(MY_CXT_KEY, FALSE)
-#else /* >= perl5.004_68 */
+#else /* >= perl5.004_68 */
#define dMY_CXT_SV \
SV *my_cxt_sv = *hv_fetch(PL_modglobal, MY_CXT_KEY, \
sizeof(MY_CXT_KEY)-1, TRUE)
-#endif /* < perl5.004_68 */
+#endif /* < perl5.004_68 */
/* This declaration should be used within all functions that use the
* interpreter-local data. */
-#define dMY_CXT \
+#define dMY_CXT \
dMY_CXT_SV; \
my_cxt_t *my_cxtp = INT2PTR(my_cxt_t*,SvUV(my_cxt_sv))
#define aMY_CXT_ aMY_CXT,
#define _aMY_CXT ,aMY_CXT
-#else /* single interpreter */
+#else /* single interpreter */
#define START_MY_CXT static my_cxt_t my_cxt;
#define dMY_CXT_SV dNOOP
#define dMY_CXT dNOOP
-#define MY_CXT_INIT NOOP
+#define MY_CXT_INIT NOOP
#define MY_CXT my_cxt
#define pMY_CXT void
#define aMY_CXT
#define aMY_CXT_
#define _aMY_CXT
-
-#endif
-
-#endif /* START_MY_CXT */
+#endif
+#endif /* START_MY_CXT */
#ifndef IVdf
-# if IVSIZE == LONGSIZE
-# define IVdf "ld"
-# define UVuf "lu"
-# define UVof "lo"
-# define UVxf "lx"
-# define UVXf "lX"
-# else
-# if IVSIZE == INTSIZE
-# define IVdf "d"
-# define UVuf "u"
-# define UVof "o"
-# define UVxf "x"
-# define UVXf "X"
-# endif
-# endif
+#if IVSIZE == LONGSIZE
+#define IVdf "ld"
+#define UVuf "lu"
+#define UVof "lo"
+#define UVxf "lx"
+#define UVXf "lX"
+#else
+#if IVSIZE == INTSIZE
+#define IVdf "d"
+#define UVuf "u"
+#define UVof "o"
+#define UVxf "x"
+#define UVXf "X"
+#endif
+#endif
#endif
#ifndef NVef
-# if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE) && \
- defined(PERL_PRIfldbl) /* Not very likely, but let's try anyway. */
-# define NVef PERL_PRIeldbl
-# define NVff PERL_PRIfldbl
-# define NVgf PERL_PRIgldbl
-# else
-# define NVef "e"
-# define NVff "f"
-# define NVgf "g"
-# endif
+#if defined(USE_LONG_DOUBLE) && defined(HAS_LONG_DOUBLE) && \
+ defined(PERL_PRIfldbl) /* Not very likely, but let's try anyway. */
+#define NVef PERL_PRIeldbl
+#define NVff PERL_PRIfldbl
+#define NVgf PERL_PRIgldbl
+#else
+#define NVef "e"
+#define NVff "f"
+#define NVgf "g"
+#endif
#endif
-#ifndef AvFILLp /* Older perls (<=5.003) lack AvFILLp */
-# define AvFILLp AvFILL
+#ifndef AvFILLp /* Older perls (<=5.003) lack AvFILLp */
+#define AvFILLp AvFILL
#endif
#ifdef SvPVbyte
-# if PERL_REVISION == 5 && PERL_VERSION < 7
- /* SvPVbyte does not work in perl-5.6.1, borrowed version for 5.7.3 */
-# undef SvPVbyte
-# define SvPVbyte(sv, lp) \
- ((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \
- ? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp))
- static char *
- my_sv_2pvbyte(pTHX_ register SV *sv, STRLEN *lp)
- {
- sv_utf8_downgrade(sv,0);
- return SvPV(sv,*lp);
- }
-# endif
+#if PERL_REVISION == 5 && PERL_VERSION < 7
+ /* SvPVbyte does not work in perl-5.6.1, borrowed version for 5.7.3 */
+#undef SvPVbyte
+#define SvPVbyte(sv, lp) \
+ ((SvFLAGS(sv) & (SVf_POK|SVf_UTF8)) == (SVf_POK) \
+ ? ((lp = SvCUR(sv)), SvPVX(sv)) : my_sv_2pvbyte(aTHX_ sv, &lp))
+static char *
+my_sv_2pvbyte(pTHX_ register SV * sv, STRLEN * lp)
+{
+ sv_utf8_downgrade(sv, 0);
+ return SvPV(sv, *lp);
+}
+#endif
#else
-# define SvPVbyte SvPV
+#define SvPVbyte SvPV
#endif
#ifndef SvPV_nolen
-# define SvPV_nolen(sv) \
- ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \
- ? SvPVX(sv) : sv_2pv_nolen(sv))
- static char *
- sv_2pv_nolen(pTHX_ register SV *sv)
- {
- STRLEN n_a;
- return sv_2pv(sv, &n_a);
- }
+#define SvPV_nolen(sv) \
+ ((SvFLAGS(sv) & (SVf_POK)) == SVf_POK \
+ ? SvPVX(sv) : sv_2pv_nolen(sv))
+static char *
+sv_2pv_nolen(pTHX_ register SV * sv)
+{
+ STRLEN n_a;
+
+ return sv_2pv(sv, &n_a);
+}
#endif
#ifndef get_cv
-# define get_cv(name,create) perl_get_cv(name,create)
+#define get_cv(name,create) perl_get_cv(name,create)
#endif
#ifndef get_sv
-# define get_sv(name,create) perl_get_sv(name,create)
+#define get_sv(name,create) perl_get_sv(name,create)
#endif
#ifndef get_av
-# define get_av(name,create) perl_get_av(name,create)
+#define get_av(name,create) perl_get_av(name,create)
#endif
#ifndef get_hv
-# define get_hv(name,create) perl_get_hv(name,create)
+#define get_hv(name,create) perl_get_hv(name,create)
#endif
#ifndef call_argv
-# define call_argv perl_call_argv
+#define call_argv perl_call_argv
#endif
#ifndef call_method
-# define call_method perl_call_method
+#define call_method perl_call_method
#endif
#ifndef call_pv
-# define call_pv perl_call_pv
+#define call_pv perl_call_pv
#endif
#ifndef call_sv
-# define call_sv perl_call_sv
+#define call_sv perl_call_sv
#endif
#ifndef eval_pv
-# define eval_pv perl_eval_pv
+#define eval_pv perl_eval_pv
#endif
#ifndef eval_sv
-# define eval_sv perl_eval_sv
+#define eval_sv perl_eval_sv
#endif
#ifndef PERL_SCAN_GREATER_THAN_UV_MAX
-# define PERL_SCAN_GREATER_THAN_UV_MAX 0x02
+#define PERL_SCAN_GREATER_THAN_UV_MAX 0x02
#endif
#ifndef PERL_SCAN_SILENT_ILLDIGIT
-# define PERL_SCAN_SILENT_ILLDIGIT 0x04
+#define PERL_SCAN_SILENT_ILLDIGIT 0x04
#endif
#ifndef PERL_SCAN_ALLOW_UNDERSCORES
-# define PERL_SCAN_ALLOW_UNDERSCORES 0x01
+#define PERL_SCAN_ALLOW_UNDERSCORES 0x01
#endif
#ifndef PERL_SCAN_DISALLOW_PREFIX
-# define PERL_SCAN_DISALLOW_PREFIX 0x02
+#define PERL_SCAN_DISALLOW_PREFIX 0x02
#endif
#if (PERL_VERSION > 6) || ((PERL_VERSION == 6) && (PERL_SUBVERSION >= 1))
#ifndef IN_LOCALE
-# define IN_LOCALE \
+#define IN_LOCALE \
(PL_curcop == &PL_compiling ? IN_LOCALE_COMPILETIME : IN_LOCALE_RUNTIME)
#endif
#ifndef IN_LOCALE_RUNTIME
-# define IN_LOCALE_RUNTIME (PL_curcop->op_private & HINT_LOCALE)
+#define IN_LOCALE_RUNTIME (PL_curcop->op_private & HINT_LOCALE)
#endif
#ifndef IN_LOCALE_COMPILETIME
-# define IN_LOCALE_COMPILETIME (PL_hints & HINT_LOCALE)
+#define IN_LOCALE_COMPILETIME (PL_hints & HINT_LOCALE)
#endif
#ifndef IS_NUMBER_IN_UV
-# define IS_NUMBER_IN_UV 0x01
-# define IS_NUMBER_GREATER_THAN_UV_MAX 0x02
-# define IS_NUMBER_NOT_INT 0x04
-# define IS_NUMBER_NEG 0x08
-# define IS_NUMBER_INFINITY 0x10
-# define IS_NUMBER_NAN 0x20
+#define IS_NUMBER_IN_UV 0x01
+#define IS_NUMBER_GREATER_THAN_UV_MAX 0x02
+#define IS_NUMBER_NOT_INT 0x04
+#define IS_NUMBER_NEG 0x08
+#define IS_NUMBER_INFINITY 0x10
+#define IS_NUMBER_NAN 0x20
#endif
#ifndef PERL_MAGIC_sv
-# define PERL_MAGIC_sv '\0'
+#define PERL_MAGIC_sv '\0'
#endif
#ifndef PERL_MAGIC_overload
-# define PERL_MAGIC_overload 'A'
+#define PERL_MAGIC_overload 'A'
#endif
#ifndef PERL_MAGIC_overload_elem
-# define PERL_MAGIC_overload_elem 'a'
+#define PERL_MAGIC_overload_elem 'a'
#endif
#ifndef PERL_MAGIC_overload_table
-# define PERL_MAGIC_overload_table 'c'
+#define PERL_MAGIC_overload_table 'c'
#endif
#ifndef PERL_MAGIC_bm
-# define PERL_MAGIC_bm 'B'
+#define PERL_MAGIC_bm 'B'
#endif
#ifndef PERL_MAGIC_regdata
-# define PERL_MAGIC_regdata 'D'
+#define PERL_MAGIC_regdata 'D'
#endif
#ifndef PERL_MAGIC_regdatum
-# define PERL_MAGIC_regdatum 'd'
+#define PERL_MAGIC_regdatum 'd'
#endif
#ifndef PERL_MAGIC_env
-# define PERL_MAGIC_env 'E'
+#define PERL_MAGIC_env 'E'
#endif
#ifndef PERL_MAGIC_envelem
-# define PERL_MAGIC_envelem 'e'
+#define PERL_MAGIC_envelem 'e'
#endif
#ifndef PERL_MAGIC_fm
-# define PERL_MAGIC_fm 'f'
+#define PERL_MAGIC_fm 'f'
#endif
#ifndef PERL_MAGIC_regex_global
-# define PERL_MAGIC_regex_global 'g'
+#define PERL_MAGIC_regex_global 'g'
#endif
#ifndef PERL_MAGIC_isa
-# define PERL_MAGIC_isa 'I'
+#define PERL_MAGIC_isa 'I'
#endif
#ifndef PERL_MAGIC_isaelem
-# define PERL_MAGIC_isaelem 'i'
+#define PERL_MAGIC_isaelem 'i'
#endif
#ifndef PERL_MAGIC_nkeys
-# define PERL_MAGIC_nkeys 'k'
+#define PERL_MAGIC_nkeys 'k'
#endif
#ifndef PERL_MAGIC_dbfile
-# define PERL_MAGIC_dbfile 'L'
+#define PERL_MAGIC_dbfile 'L'
#endif
#ifndef PERL_MAGIC_dbline
-# define PERL_MAGIC_dbline 'l'
+#define PERL_MAGIC_dbline 'l'
#endif
#ifndef PERL_MAGIC_mutex
-# define PERL_MAGIC_mutex 'm'
+#define PERL_MAGIC_mutex 'm'
#endif
#ifndef PERL_MAGIC_shared
-# define PERL_MAGIC_shared 'N'
+#define PERL_MAGIC_shared 'N'
#endif
#ifndef PERL_MAGIC_shared_scalar
-# define PERL_MAGIC_shared_scalar 'n'
+#define PERL_MAGIC_shared_scalar 'n'
#endif
#ifndef PERL_MAGIC_collxfrm
-# define PERL_MAGIC_collxfrm 'o'
+#define PERL_MAGIC_collxfrm 'o'
#endif
#ifndef PERL_MAGIC_tied
-# define PERL_MAGIC_tied 'P'
+#define PERL_MAGIC_tied 'P'
#endif
#ifndef PERL_MAGIC_tiedelem
-# define PERL_MAGIC_tiedelem 'p'
+#define PERL_MAGIC_tiedelem 'p'
#endif
#ifndef PERL_MAGIC_tiedscalar
-# define PERL_MAGIC_tiedscalar 'q'
+#define PERL_MAGIC_tiedscalar 'q'
#endif
#ifndef PERL_MAGIC_qr
-# define PERL_MAGIC_qr 'r'
+#define PERL_MAGIC_qr 'r'
#endif
#ifndef PERL_MAGIC_sig
-# define PERL_MAGIC_sig 'S'
+#define PERL_MAGIC_sig 'S'
#endif
#ifndef PERL_MAGIC_sigelem
-# define PERL_MAGIC_sigelem 's'
+#define PERL_MAGIC_sigelem 's'
#endif
#ifndef PERL_MAGIC_taint
-# define PERL_MAGIC_taint 't'
+#define PERL_MAGIC_taint 't'
#endif
#ifndef PERL_MAGIC_uvar
-# define PERL_MAGIC_uvar 'U'
+#define PERL_MAGIC_uvar 'U'
#endif
#ifndef PERL_MAGIC_uvar_elem
-# define PERL_MAGIC_uvar_elem 'u'
+#define PERL_MAGIC_uvar_elem 'u'
#endif
#ifndef PERL_MAGIC_vstring
-# define PERL_MAGIC_vstring 'V'
+#define PERL_MAGIC_vstring 'V'
#endif
#ifndef PERL_MAGIC_vec
-# define PERL_MAGIC_vec 'v'
+#define PERL_MAGIC_vec 'v'
#endif
#ifndef PERL_MAGIC_utf8
-# define PERL_MAGIC_utf8 'w'
+#define PERL_MAGIC_utf8 'w'
#endif
#ifndef PERL_MAGIC_substr
-# define PERL_MAGIC_substr 'x'
+#define PERL_MAGIC_substr 'x'
#endif
#ifndef PERL_MAGIC_defelem
-# define PERL_MAGIC_defelem 'y'
+#define PERL_MAGIC_defelem 'y'
#endif
#ifndef PERL_MAGIC_glob
-# define PERL_MAGIC_glob '*'
+#define PERL_MAGIC_glob '*'
#endif
#ifndef PERL_MAGIC_arylen
-# define PERL_MAGIC_arylen '#'
+#define PERL_MAGIC_arylen '#'
#endif
#ifndef PERL_MAGIC_pos
-# define PERL_MAGIC_pos '.'
+#define PERL_MAGIC_pos '.'
#endif
#ifndef PERL_MAGIC_backref
-# define PERL_MAGIC_backref '<'
+#define PERL_MAGIC_backref '<'
#endif
#ifndef PERL_MAGIC_ext
-# define PERL_MAGIC_ext '~'
+#define PERL_MAGIC_ext '~'
#endif
-
-#endif /* _P_P_PORTABILITY_H_ */
+#endif /* _P_P_PORTABILITY_H_ */
/* End of File ppport.h */
#include "spi_internal.h"
-static HV* plperl_spi_execute_fetch_result(SPITupleTable*, int, int );
+static HV *plperl_spi_execute_fetch_result(SPITupleTable *, int, int);
int
return ERROR;
}
-HV*
-plperl_spi_exec(char* query, int limit)
+HV *
+plperl_spi_exec(char *query, int limit)
{
- HV *ret_hv;
- int spi_rv;
+ HV *ret_hv;
+ int spi_rv;
spi_rv = SPI_exec(query, limit);
- ret_hv=plperl_spi_execute_fetch_result(SPI_tuptable, SPI_processed, spi_rv);
+ ret_hv = plperl_spi_execute_fetch_result(SPI_tuptable, SPI_processed, spi_rv);
return ret_hv;
}
-static HV*
+static HV *
plperl_hash_from_tuple(HeapTuple tuple, TupleDesc tupdesc)
{
- int i;
- char *attname;
- char *attdata;
+ int i;
+ char *attname;
+ char *attdata;
- HV *array;
+ HV *array;
array = newHV();
- for (i = 0; i < tupdesc->natts; i++) {
+ for (i = 0; i < tupdesc->natts; i++)
+ {
/************************************************************
* Get the attribute name
************************************************************/
/************************************************************
* Get the attributes value
************************************************************/
- attdata = SPI_getvalue(tuple, tupdesc, i+1);
- if(attdata)
- hv_store(array, attname, strlen(attname), newSVpv(attdata,0), 0);
+ attdata = SPI_getvalue(tuple, tupdesc, i + 1);
+ if (attdata)
+ hv_store(array, attname, strlen(attname), newSVpv(attdata, 0), 0);
else
- hv_store(array, attname, strlen(attname), newSVpv("undef",0), 0);
+ hv_store(array, attname, strlen(attname), newSVpv("undef", 0), 0);
}
return array;
}
-static HV*
+static HV *
plperl_spi_execute_fetch_result(SPITupleTable *tuptable, int processed, int status)
{
- HV *result;
+ HV *result;
result = newHV();
hv_store(result, "status", strlen("status"),
- newSVpv((char*)SPI_result_code_string(status),0), 0);
+ newSVpv((char *) SPI_result_code_string(status), 0), 0);
hv_store(result, "processed", strlen("processed"),
newSViv(processed), 0);
{
if (processed)
{
- AV *rows;
- HV *row;
- int i;
+ AV *rows;
+ HV *row;
+ int i;
rows = newAV();
for (i = 0; i < processed; i++)
{
row = plperl_hash_from_tuple(tuptable->vals[i], tuptable->tupdesc);
- av_store(rows, i, newRV_noinc((SV*)row));
+ av_store(rows, i, newRV_noinc((SV *) row));
}
hv_store(result, "rows", strlen("rows"),
- newRV_noinc((SV*)rows), 0);
+ newRV_noinc((SV *) rows), 0);
}
}
int spi_ERROR(void);
-HV* plperl_spi_exec(char*, int);
-
-
+HV *plperl_spi_exec(char *, int);
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.79 2004/08/20 22:00:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_comp.c,v 1.80 2004/08/29 05:07:01 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
* Lookup table for EXCEPTION condition names
* ----------
*/
-typedef struct {
+typedef struct
+{
const char *label;
int sqlerrstate;
-} ExceptionLabelMap;
+} ExceptionLabelMap;
static const ExceptionLabelMap exception_label_map[] = {
#include "plerrcodes.h"
- { NULL, 0 }
+ {NULL, 0}
};
*/
static PLpgSQL_function *do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator);
static void plpgsql_compile_error_callback(void *arg);
static char **fetchArgNames(HeapTuple procTup, int nargs);
static PLpgSQL_type *build_datatype(HeapTuple typeTup, int32 typmod);
static void compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator);
-static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key);
-static void plpgsql_HashTableInsert(PLpgSQL_function *function,
- PLpgSQL_func_hashkey *func_key);
-static void plpgsql_HashTableDelete(PLpgSQL_function *function);
+static PLpgSQL_function *plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key);
+static void plpgsql_HashTableInsert(PLpgSQL_function * function,
+ PLpgSQL_func_hashkey * func_key);
+static void plpgsql_HashTableDelete(PLpgSQL_function * function);
/*
* This routine is a crock, and so is everyplace that calls it. The problem
static PLpgSQL_function *
do_compile(FunctionCallInfo fcinfo,
HeapTuple procTup,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator)
{
Form_pg_proc procStruct = (Form_pg_proc) GETSTRUCT(procTup);
procStruct->prorettype == ANYELEMENTOID)
{
(void) plpgsql_build_variable(strdup("$0"), 0,
- build_datatype(typeTup, -1),
+ build_datatype(typeTup, -1),
true);
}
}
argdtype->ttype != PLPGSQL_TTYPE_ROW)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("plpgsql functions cannot take type %s",
- format_type_be(argtypeid))));
+ errmsg("plpgsql functions cannot take type %s",
+ format_type_be(argtypeid))));
/* Build variable and add to datum list */
argvariable = plpgsql_build_variable(strdup(buf), 0,
* Add the variable tg_name
*/
var = plpgsql_build_variable(strdup("tg_name"), 0,
- plpgsql_build_datatype(NAMEOID, -1),
+ plpgsql_build_datatype(NAMEOID, -1),
true);
function->tg_name_varno = var->dno;
* Add the variable tg_when
*/
var = plpgsql_build_variable(strdup("tg_when"), 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_when_varno = var->dno;
* Add the variable tg_level
*/
var = plpgsql_build_variable(strdup("tg_level"), 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_level_varno = var->dno;
* Add the variable tg_op
*/
var = plpgsql_build_variable(strdup("tg_op"), 0,
- plpgsql_build_datatype(TEXTOID, -1),
+ plpgsql_build_datatype(TEXTOID, -1),
true);
function->tg_op_varno = var->dno;
* Add the variable tg_relid
*/
var = plpgsql_build_variable(strdup("tg_relid"), 0,
- plpgsql_build_datatype(OIDOID, -1),
+ plpgsql_build_datatype(OIDOID, -1),
true);
function->tg_relid_varno = var->dno;
* Add the variable tg_relname
*/
var = plpgsql_build_variable(strdup("tg_relname"), 0,
- plpgsql_build_datatype(NAMEOID, -1),
+ plpgsql_build_datatype(NAMEOID, -1),
true);
function->tg_relname_varno = var->dno;
* Add the variable tg_nargs
*/
var = plpgsql_build_variable(strdup("tg_nargs"), 0,
- plpgsql_build_datatype(INT4OID, -1),
+ plpgsql_build_datatype(INT4OID, -1),
true);
function->tg_nargs_varno = var->dno;
*/
if (function_parse_error_transpose((const char *) arg))
return;
+
/*
- * Done if a syntax error position was reported; otherwise we
- * have to fall back to a "near line N" report.
+ * Done if a syntax error position was reported; otherwise we have
+ * to fall back to a "near line N" report.
*/
}
result = (char **) palloc(sizeof(char *) * nargs);
- for (i=0; i < nargs; i++)
+ for (i = 0; i < nargs; i++)
result[i] = DatumGetCString(DirectFunctionCall1(textout, elems[i]));
return result;
plpgsql_yylval.dtype = ((PLpgSQL_var *) (plpgsql_Datums[nse->itemno]))->datatype;
return T_DTYPE;
- /* XXX perhaps allow REC here? */
+ /* XXX perhaps allow REC here? */
default:
return T_ERROR;
* to the current datum array, and optionally to the current namespace.
*/
PLpgSQL_variable *
-plpgsql_build_variable(char *refname, int lineno, PLpgSQL_type *dtype,
- bool add2namespace)
+plpgsql_build_variable(char *refname, int lineno, PLpgSQL_type * dtype,
+ bool add2namespace)
{
PLpgSQL_variable *result;
switch (dtype->ttype)
{
case PLPGSQL_TTYPE_SCALAR:
- {
- /* Ordinary scalar datatype */
- PLpgSQL_var *var;
-
- var = malloc(sizeof(PLpgSQL_var));
- memset(var, 0, sizeof(PLpgSQL_var));
-
- var->dtype = PLPGSQL_DTYPE_VAR;
- var->refname = refname;
- var->lineno = lineno;
- var->datatype = dtype;
- /* other fields might be filled by caller */
-
- /* preset to NULL */
- var->value = 0;
- var->isnull = true;
- var->freeval = false;
-
- plpgsql_adddatum((PLpgSQL_datum *) var);
- if (add2namespace)
- plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR,
- var->varno,
- refname);
- result = (PLpgSQL_variable *) var;
- break;
- }
+ {
+ /* Ordinary scalar datatype */
+ PLpgSQL_var *var;
+
+ var = malloc(sizeof(PLpgSQL_var));
+ memset(var, 0, sizeof(PLpgSQL_var));
+
+ var->dtype = PLPGSQL_DTYPE_VAR;
+ var->refname = refname;
+ var->lineno = lineno;
+ var->datatype = dtype;
+ /* other fields might be filled by caller */
+
+ /* preset to NULL */
+ var->value = 0;
+ var->isnull = true;
+ var->freeval = false;
+
+ plpgsql_adddatum((PLpgSQL_datum *) var);
+ if (add2namespace)
+ plpgsql_ns_additem(PLPGSQL_NSTYPE_VAR,
+ var->varno,
+ refname);
+ result = (PLpgSQL_variable *) var;
+ break;
+ }
case PLPGSQL_TTYPE_ROW:
- {
- /* Composite type -- build a row variable */
- PLpgSQL_row *row;
+ {
+ /* Composite type -- build a row variable */
+ PLpgSQL_row *row;
- row = build_row_var(dtype->typrelid);
+ row = build_row_var(dtype->typrelid);
- row->dtype = PLPGSQL_DTYPE_ROW;
- row->refname = refname;
- row->lineno = lineno;
+ row->dtype = PLPGSQL_DTYPE_ROW;
+ row->refname = refname;
+ row->lineno = lineno;
- plpgsql_adddatum((PLpgSQL_datum *) row);
- if (add2namespace)
- plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW,
- row->rowno,
- refname);
- result = (PLpgSQL_variable *) row;
- break;
- }
+ plpgsql_adddatum((PLpgSQL_datum *) row);
+ if (add2namespace)
+ plpgsql_ns_additem(PLPGSQL_NSTYPE_ROW,
+ row->rowno,
+ refname);
+ result = (PLpgSQL_variable *) row;
+ break;
+ }
case PLPGSQL_TTYPE_REC:
- {
- /* "record" type -- build a variable-contents record variable */
- PLpgSQL_rec *rec;
+ {
+ /*
+ * "record" type -- build a variable-contents record
+ * variable
+ */
+ PLpgSQL_rec *rec;
- rec = malloc(sizeof(PLpgSQL_rec));
- memset(rec, 0, sizeof(PLpgSQL_rec));
+ rec = malloc(sizeof(PLpgSQL_rec));
+ memset(rec, 0, sizeof(PLpgSQL_rec));
- rec->dtype = PLPGSQL_DTYPE_REC;
- rec->refname = refname;
- rec->lineno = lineno;
+ rec->dtype = PLPGSQL_DTYPE_REC;
+ rec->refname = refname;
+ rec->lineno = lineno;
- plpgsql_adddatum((PLpgSQL_datum *) rec);
- if (add2namespace)
- plpgsql_ns_additem(PLPGSQL_NSTYPE_REC,
- rec->recno,
- refname);
- result = (PLpgSQL_variable *) rec;
- break;
- }
+ plpgsql_adddatum((PLpgSQL_datum *) rec);
+ if (add2namespace)
+ plpgsql_ns_additem(PLPGSQL_NSTYPE_REC,
+ rec->recno,
+ refname);
+ result = (PLpgSQL_variable *) rec;
+ break;
+ }
case PLPGSQL_TTYPE_PSEUDO:
- {
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("variable \"%s\" has pseudo-type %s",
- refname, format_type_be(dtype->typoid))));
- result = NULL; /* keep compiler quiet */
- break;
- }
+ {
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("variable \"%s\" has pseudo-type %s",
+ refname, format_type_be(dtype->typoid))));
+ result = NULL; /* keep compiler quiet */
+ break;
+ }
default:
elog(ERROR, "unrecognized ttype: %d", dtype->ttype);
result = NULL; /* keep compiler quiet */
if (!attrStruct->attisdropped)
{
const char *attname;
- char *refname;
+ char *refname;
PLpgSQL_variable *var;
attname = NameStr(attrStruct->attname);
*
* We know if the table definitions contain a default value or if
* the field is declared in the table as NOT NULL. But it's
- * possible to create a table field as NOT NULL without a default
- * value and that would lead to problems later when initializing
- * the variables due to entering a block at execution time. Thus
- * we ignore this information for now.
+ * possible to create a table field as NOT NULL without a
+ * default value and that would lead to problems later when
+ * initializing the variables due to entering a block at
+ * execution time. Thus we ignore this information for now.
*/
var = plpgsql_build_variable(refname, 0,
- plpgsql_build_datatype(attrStruct->atttypid,
- attrStruct->atttypmod),
+ plpgsql_build_datatype(attrStruct->atttypid,
+ attrStruct->atttypmod),
false);
/*
plpgsql_parse_err_condition(char *condname)
{
int i;
- PLpgSQL_condition *new;
- PLpgSQL_condition *prev;
+ PLpgSQL_condition *new;
+ PLpgSQL_condition *prev;
/*
- * XXX Eventually we will want to look for user-defined exception names
- * here.
+ * XXX Eventually we will want to look for user-defined exception
+ * names here.
*/
/*
- * OTHERS is represented as code 0 (which would map to '00000', but
- * we have no need to represent that as an exception condition).
+ * OTHERS is represented as code 0 (which would map to '00000', but we
+ * have no need to represent that as an exception condition).
*/
if (strcmp(condname, "others") == 0)
{
static void
compute_function_hashkey(FunctionCallInfo fcinfo,
Form_pg_proc procStruct,
- PLpgSQL_func_hashkey *hashkey,
+ PLpgSQL_func_hashkey * hashkey,
bool forValidator)
{
int i;
hashkey->funcOid = fcinfo->flinfo->fn_oid;
/*
- * if trigger, get relation OID. In validation mode we do not know what
- * relation is intended to be used, so we leave trigrelOid zero; the
- * hash entry built in this case will never really be used.
+ * if trigger, get relation OID. In validation mode we do not know
+ * what relation is intended to be used, so we leave trigrelOid zero;
+ * the hash entry built in this case will never really be used.
*/
if (CALLED_AS_TRIGGER(fcinfo) && !forValidator)
{
}
static PLpgSQL_function *
-plpgsql_HashTableLookup(PLpgSQL_func_hashkey *func_key)
+plpgsql_HashTableLookup(PLpgSQL_func_hashkey * func_key)
{
plpgsql_HashEnt *hentry;
}
static void
-plpgsql_HashTableInsert(PLpgSQL_function *function,
- PLpgSQL_func_hashkey *func_key)
+plpgsql_HashTableInsert(PLpgSQL_function * function,
+ PLpgSQL_func_hashkey * func_key)
{
plpgsql_HashEnt *hentry;
bool found;
}
static void
-plpgsql_HashTableDelete(PLpgSQL_function *function)
+plpgsql_HashTableDelete(PLpgSQL_function * function)
{
plpgsql_HashEnt *hentry;
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.116 2004/08/20 22:00:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_exec.c,v 1.117 2004/08/29 05:07:01 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
* All plpgsql function executions within a single transaction share
* the same executor EState for evaluating "simple" expressions. Each
* function call creates its own "eval_econtext" ExprContext within this
- * estate. We destroy the estate at transaction shutdown to ensure there
+ * estate. We destroy the estate at transaction shutdown to ensure there
* is no permanent leakage of memory (especially for xact abort case).
*
* If a simple PLpgSQL_expr has been used in the current xact, it is
Datum *value,
bool *isnull);
static int exec_eval_integer(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
- bool *isNull);
+ PLpgSQL_expr * expr,
+ bool *isNull);
static bool exec_eval_boolean(PLpgSQL_execstate * estate,
- PLpgSQL_expr * expr,
- bool *isNull);
+ PLpgSQL_expr * expr,
+ bool *isNull);
static Datum exec_eval_expr(PLpgSQL_execstate * estate,
PLpgSQL_expr * expr,
bool *isNull,
PLpgSQL_row * row,
HeapTuple tup, TupleDesc tupdesc);
static HeapTuple make_tuple_from_row(PLpgSQL_execstate * estate,
- PLpgSQL_row * row,
- TupleDesc tupdesc);
+ PLpgSQL_row * row,
+ TupleDesc tupdesc);
static char *convert_value_to_string(Datum value, Oid valtype);
static Datum exec_cast_value(Datum value, Oid valtype,
Oid reqtype,
static bool
-exception_matches_conditions(ErrorData *edata, PLpgSQL_condition *cond)
+exception_matches_conditions(ErrorData *edata, PLpgSQL_condition * cond)
{
for (; cond != NULL; cond = cond->next)
{
int sqlerrstate = cond->sqlerrstate;
/*
- * OTHERS matches everything *except* query-canceled;
- * if you're foolish enough, you can match that explicitly.
+ * OTHERS matches everything *except* query-canceled; if you're
+ * foolish enough, you can match that explicitly.
*/
if (sqlerrstate == 0)
{
if (block->exceptions)
{
/*
- * Execute the statements in the block's body inside a sub-transaction
+ * Execute the statements in the block's body inside a
+ * sub-transaction
*/
- MemoryContext oldcontext = CurrentMemoryContext;
- volatile bool caught = false;
- int xrc;
+ MemoryContext oldcontext = CurrentMemoryContext;
+ volatile bool caught = false;
+ int xrc;
/*
* Start a subtransaction, and re-connect to SPI within it
SPI_result_code_string(xrc));
PG_TRY();
- {
rc = exec_stmts(estate, block->body);
- }
PG_CATCH();
{
- ErrorData *edata;
+ ErrorData *edata;
PLpgSQL_exceptions *exceptions;
int j;
{
PLpgSQL_row *row = (PLpgSQL_row *) (estate->datums[stmt->retrowno]);
- if (row->rowtupdesc) /* should always be true here */
+ if (row->rowtupdesc) /* should always be true here */
{
estate->retval = (Datum) make_tuple_from_row(estate, row,
- row->rowtupdesc);
- if (estate->retval == (Datum) NULL) /* should not happen */
+ row->rowtupdesc);
+ if (estate->retval == (Datum) NULL) /* should not happen */
elog(ERROR, "row not compatible with its own tupdesc");
estate->rettupdesc = row->rowtupdesc;
estate->retisnull = false;
if (tuple == NULL)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("wrong record type supplied in RETURN NEXT")));
+ errmsg("wrong record type supplied in RETURN NEXT")));
free_tuple = true;
}
else if (stmt->expr)
estate->err_text = raise_skip_msg; /* suppress traceback of raise */
ereport(stmt->elog_level,
- ((stmt->elog_level >= ERROR) ? errcode(ERRCODE_RAISE_EXCEPTION) : 0,
- errmsg_internal("%s", plpgsql_dstring_get(&ds))));
+ ((stmt->elog_level >= ERROR) ? errcode(ERRCODE_RAISE_EXCEPTION) : 0,
+ errmsg_internal("%s", plpgsql_dstring_get(&ds))));
estate->err_text = NULL; /* un-suppress... */
case SPI_ERROR_COPY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot COPY to/from client in PL/pgSQL")));
+ errmsg("cannot COPY to/from client in PL/pgSQL")));
case SPI_ERROR_CURSOR:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot manipulate cursors directly in PL/pgSQL"),
- errhint("Use PL/pgSQL's cursor features instead.")));
+ errmsg("cannot manipulate cursors directly in PL/pgSQL"),
+ errhint("Use PL/pgSQL's cursor features instead.")));
case SPI_ERROR_TRANSACTION:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot begin/end transactions in PL/pgSQL"),
+ errmsg("cannot begin/end transactions in PL/pgSQL"),
errhint("Use a BEGIN block with an EXCEPTION clause instead.")));
default:
elog(ERROR, "SPI_prepare failed for \"%s\": %s",
break;
}
- /* Some SPI errors deserve specific error messages */
+ /* Some SPI errors deserve specific error messages */
case SPI_ERROR_COPY:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
case SPI_ERROR_CURSOR:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("cannot manipulate cursors directly in PL/pgSQL"),
+ errmsg("cannot manipulate cursors directly in PL/pgSQL"),
errhint("Use PL/pgSQL's cursor features instead.")));
case SPI_ERROR_TRANSACTION:
ereport(ERROR,
switch (target->dtype)
{
case PLPGSQL_DTYPE_VAR:
- {
- /*
- * Target is a variable
- */
- PLpgSQL_var *var = (PLpgSQL_var *) target;
- Datum newvalue;
+ {
+ /*
+ * Target is a variable
+ */
+ PLpgSQL_var *var = (PLpgSQL_var *) target;
+ Datum newvalue;
- newvalue = exec_cast_value(value, valtype, var->datatype->typoid,
- &(var->datatype->typinput),
- var->datatype->typioparam,
- var->datatype->atttypmod,
- isNull);
+ newvalue = exec_cast_value(value, valtype, var->datatype->typoid,
+ &(var->datatype->typinput),
+ var->datatype->typioparam,
+ var->datatype->atttypmod,
+ isNull);
- if (*isNull && var->notnull)
- ereport(ERROR,
- (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("NULL cannot be assigned to variable \"%s\" declared NOT NULL",
- var->refname)));
+ if (*isNull && var->notnull)
+ ereport(ERROR,
+ (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
+ errmsg("NULL cannot be assigned to variable \"%s\" declared NOT NULL",
+ var->refname)));
- if (var->freeval)
- {
- pfree(DatumGetPointer(var->value));
- var->freeval = false;
- }
+ if (var->freeval)
+ {
+ pfree(DatumGetPointer(var->value));
+ var->freeval = false;
+ }
- /*
- * If type is by-reference, make sure we have a freshly
- * palloc'd copy; the originally passed value may not live as
- * long as the variable! But we don't need to re-copy if
- * exec_cast_value performed a conversion; its output must
- * already be palloc'd.
- */
- if (!var->datatype->typbyval && !*isNull)
- {
- if (newvalue == value)
- var->value = datumCopy(newvalue,
- false,
- var->datatype->typlen);
+ /*
+ * If type is by-reference, make sure we have a freshly
+ * palloc'd copy; the originally passed value may not live
+ * as long as the variable! But we don't need to re-copy
+ * if exec_cast_value performed a conversion; its output
+ * must already be palloc'd.
+ */
+ if (!var->datatype->typbyval && !*isNull)
+ {
+ if (newvalue == value)
+ var->value = datumCopy(newvalue,
+ false,
+ var->datatype->typlen);
+ else
+ var->value = newvalue;
+ var->freeval = true;
+ }
else
var->value = newvalue;
- var->freeval = true;
+ var->isnull = *isNull;
+ break;
}
- else
- var->value = newvalue;
- var->isnull = *isNull;
- break;
- }
case PLPGSQL_DTYPE_ROW:
- {
- /*
- * Target is a row variable
- */
- PLpgSQL_row *row = (PLpgSQL_row *) target;
-
- /* Source must be of RECORD or composite type */
- if (!(valtype == RECORDOID ||
- get_typtype(valtype) == 'c'))
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot assign non-composite value to a row variable")));
- if (*isNull)
- {
- /* If source is null, just assign nulls to the row */
- exec_move_row(estate, NULL, row, NULL, NULL);
- }
- else
{
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup;
-
- /* Else source is a tuple Datum, safe to do this: */
- td = DatumGetHeapTupleHeader(value);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- ItemPointerSetInvalid(&(tmptup.t_self));
- tmptup.t_tableOid = InvalidOid;
- tmptup.t_data = td;
- exec_move_row(estate, NULL, row, &tmptup, tupdesc);
+ /*
+ * Target is a row variable
+ */
+ PLpgSQL_row *row = (PLpgSQL_row *) target;
+
+ /* Source must be of RECORD or composite type */
+ if (!(valtype == RECORDOID ||
+ get_typtype(valtype) == 'c'))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("cannot assign non-composite value to a row variable")));
+ if (*isNull)
+ {
+ /* If source is null, just assign nulls to the row */
+ exec_move_row(estate, NULL, row, NULL, NULL);
+ }
+ else
+ {
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup;
+
+ /* Else source is a tuple Datum, safe to do this: */
+ td = DatumGetHeapTupleHeader(value);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ ItemPointerSetInvalid(&(tmptup.t_self));
+ tmptup.t_tableOid = InvalidOid;
+ tmptup.t_data = td;
+ exec_move_row(estate, NULL, row, &tmptup, tupdesc);
+ }
+ break;
}
- break;
- }
case PLPGSQL_DTYPE_REC:
- {
- /*
- * Target is a record variable
- */
- PLpgSQL_rec *rec = (PLpgSQL_rec *) target;
-
- /* Source must be of RECORD or composite type */
- if (!(valtype == RECORDOID ||
- get_typtype(valtype) == 'c'))
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("cannot assign non-composite value to a record variable")));
- if (*isNull)
{
- /* If source is null, just assign nulls to the record */
- exec_move_row(estate, rec, NULL, NULL, NULL);
- }
- else
- {
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup;
-
- /* Else source is a tuple Datum, safe to do this: */
- td = DatumGetHeapTupleHeader(value);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- ItemPointerSetInvalid(&(tmptup.t_self));
- tmptup.t_tableOid = InvalidOid;
- tmptup.t_data = td;
- exec_move_row(estate, rec, NULL, &tmptup, tupdesc);
+ /*
+ * Target is a record variable
+ */
+ PLpgSQL_rec *rec = (PLpgSQL_rec *) target;
+
+ /* Source must be of RECORD or composite type */
+ if (!(valtype == RECORDOID ||
+ get_typtype(valtype) == 'c'))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("cannot assign non-composite value to a record variable")));
+ if (*isNull)
+ {
+ /* If source is null, just assign nulls to the record */
+ exec_move_row(estate, rec, NULL, NULL, NULL);
+ }
+ else
+ {
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup;
+
+ /* Else source is a tuple Datum, safe to do this: */
+ td = DatumGetHeapTupleHeader(value);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ ItemPointerSetInvalid(&(tmptup.t_self));
+ tmptup.t_tableOid = InvalidOid;
+ tmptup.t_data = td;
+ exec_move_row(estate, rec, NULL, &tmptup, tupdesc);
+ }
+ break;
}
- break;
- }
case PLPGSQL_DTYPE_RECFIELD:
- {
- /*
- * Target is a field of a record
- */
- PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) target;
- PLpgSQL_rec *rec;
- int fno;
- HeapTuple newtup;
- int natts;
- int i;
- Datum *values;
- char *nulls;
- void *mustfree;
- bool attisnull;
- Oid atttype;
- int32 atttypmod;
-
- rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
+ {
+ /*
+ * Target is a field of a record
+ */
+ PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) target;
+ PLpgSQL_rec *rec;
+ int fno;
+ HeapTuple newtup;
+ int natts;
+ int i;
+ Datum *values;
+ char *nulls;
+ void *mustfree;
+ bool attisnull;
+ Oid atttype;
+ int32 atttypmod;
+
+ rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
- /*
- * Check that there is already a tuple in the record. We need
- * that because records don't have any predefined field
- * structure.
- */
- if (!HeapTupleIsValid(rec->tup))
- ereport(ERROR,
+ /*
+ * Check that there is already a tuple in the record. We
+ * need that because records don't have any predefined
+ * field structure.
+ */
+ if (!HeapTupleIsValid(rec->tup))
+ ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("record \"%s\" is not assigned yet",
rec->refname),
errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
- /*
- * Get the number of the records field to change and the
- * number of attributes in the tuple.
- */
- fno = SPI_fnumber(rec->tupdesc, recfield->fieldname);
- if (fno == SPI_ERROR_NOATTRIBUTE)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("record \"%s\" has no field \"%s\"",
- rec->refname, recfield->fieldname)));
- fno--;
- natts = rec->tupdesc->natts;
+ /*
+ * Get the number of the records field to change and the
+ * number of attributes in the tuple.
+ */
+ fno = SPI_fnumber(rec->tupdesc, recfield->fieldname);
+ if (fno == SPI_ERROR_NOATTRIBUTE)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("record \"%s\" has no field \"%s\"",
+ rec->refname, recfield->fieldname)));
+ fno--;
+ natts = rec->tupdesc->natts;
- /*
- * Set up values/datums arrays for heap_formtuple. For all
- * the attributes except the one we want to replace, use the
- * value that's in the old tuple.
- */
- values = palloc(sizeof(Datum) * natts);
- nulls = palloc(natts);
+ /*
+ * Set up values/datums arrays for heap_formtuple. For
+ * all the attributes except the one we want to replace,
+ * use the value that's in the old tuple.
+ */
+ values = palloc(sizeof(Datum) * natts);
+ nulls = palloc(natts);
- for (i = 0; i < natts; i++)
- {
- if (i == fno)
- continue;
- values[i] = SPI_getbinval(rec->tup, rec->tupdesc,
- i + 1, &attisnull);
+ for (i = 0; i < natts; i++)
+ {
+ if (i == fno)
+ continue;
+ values[i] = SPI_getbinval(rec->tup, rec->tupdesc,
+ i + 1, &attisnull);
+ if (attisnull)
+ nulls[i] = 'n';
+ else
+ nulls[i] = ' ';
+ }
+
+ /*
+ * Now insert the new value, being careful to cast it to
+ * the right type.
+ */
+ atttype = SPI_gettypeid(rec->tupdesc, fno + 1);
+ atttypmod = rec->tupdesc->attrs[fno]->atttypmod;
+ attisnull = *isNull;
+ values[fno] = exec_simple_cast_value(value,
+ valtype,
+ atttype,
+ atttypmod,
+ &attisnull);
if (attisnull)
- nulls[i] = 'n';
+ nulls[fno] = 'n';
else
- nulls[i] = ' ';
- }
+ nulls[fno] = ' ';
- /*
- * Now insert the new value, being careful to cast it to the
- * right type.
- */
- atttype = SPI_gettypeid(rec->tupdesc, fno + 1);
- atttypmod = rec->tupdesc->attrs[fno]->atttypmod;
- attisnull = *isNull;
- values[fno] = exec_simple_cast_value(value,
- valtype,
- atttype,
- atttypmod,
- &attisnull);
- if (attisnull)
- nulls[fno] = 'n';
- else
- nulls[fno] = ' ';
-
- /*
- * Avoid leaking the result of exec_simple_cast_value, if it
- * performed a conversion to a pass-by-ref type.
- */
- if (!attisnull && values[fno] != value && !get_typbyval(atttype))
- mustfree = DatumGetPointer(values[fno]);
- else
- mustfree = NULL;
+ /*
+ * Avoid leaking the result of exec_simple_cast_value, if
+ * it performed a conversion to a pass-by-ref type.
+ */
+ if (!attisnull && values[fno] != value && !get_typbyval(atttype))
+ mustfree = DatumGetPointer(values[fno]);
+ else
+ mustfree = NULL;
- /*
- * Now call heap_formtuple() to create a new tuple that
- * replaces the old one in the record.
- */
- newtup = heap_formtuple(rec->tupdesc, values, nulls);
+ /*
+ * Now call heap_formtuple() to create a new tuple that
+ * replaces the old one in the record.
+ */
+ newtup = heap_formtuple(rec->tupdesc, values, nulls);
- if (rec->freetup)
- heap_freetuple(rec->tup);
+ if (rec->freetup)
+ heap_freetuple(rec->tup);
- rec->tup = newtup;
- rec->freetup = true;
+ rec->tup = newtup;
+ rec->freetup = true;
- pfree(values);
- pfree(nulls);
- if (mustfree)
- pfree(mustfree);
+ pfree(values);
+ pfree(nulls);
+ if (mustfree)
+ pfree(mustfree);
- break;
- }
+ break;
+ }
case PLPGSQL_DTYPE_ARRAYELEM:
- {
- int nsubscripts;
- int i;
- PLpgSQL_expr *subscripts[MAXDIM];
- int subscriptvals[MAXDIM];
- bool havenullsubscript,
- oldarrayisnull;
- Oid arraytypeid,
- arrayelemtypeid;
- int16 elemtyplen;
- bool elemtypbyval;
- char elemtypalign;
- Datum oldarrayval,
- coerced_value;
- ArrayType *newarrayval;
-
- /*
- * Target is an element of an array
- *
- * To handle constructs like x[1][2] := something, we have to be
- * prepared to deal with a chain of arrayelem datums. Chase
- * back to find the base array datum, and save the subscript
- * expressions as we go. (We are scanning right to left here,
- * but want to evaluate the subscripts left-to-right to
- * minimize surprises.)
- */
- nsubscripts = 0;
- do
{
- PLpgSQL_arrayelem *arrayelem = (PLpgSQL_arrayelem *) target;
+ int nsubscripts;
+ int i;
+ PLpgSQL_expr *subscripts[MAXDIM];
+ int subscriptvals[MAXDIM];
+ bool havenullsubscript,
+ oldarrayisnull;
+ Oid arraytypeid,
+ arrayelemtypeid;
+ int16 elemtyplen;
+ bool elemtypbyval;
+ char elemtypalign;
+ Datum oldarrayval,
+ coerced_value;
+ ArrayType *newarrayval;
- if (nsubscripts >= MAXDIM)
- ereport(ERROR,
- (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
- errmsg("number of array dimensions exceeds the maximum allowed, %d",
- MAXDIM)));
- subscripts[nsubscripts++] = arrayelem->subscript;
- target = estate->datums[arrayelem->arrayparentno];
- } while (target->dtype == PLPGSQL_DTYPE_ARRAYELEM);
-
- /* Fetch current value of array datum */
- exec_eval_datum(estate, target, InvalidOid,
+ /*
+ * Target is an element of an array
+ *
+ * To handle constructs like x[1][2] := something, we have to
+ * be prepared to deal with a chain of arrayelem datums.
+ * Chase back to find the base array datum, and save the
+ * subscript expressions as we go. (We are scanning right
+ * to left here, but want to evaluate the subscripts
+ * left-to-right to minimize surprises.)
+ */
+ nsubscripts = 0;
+ do
+ {
+ PLpgSQL_arrayelem *arrayelem = (PLpgSQL_arrayelem *) target;
+
+ if (nsubscripts >= MAXDIM)
+ ereport(ERROR,
+ (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("number of array dimensions exceeds the maximum allowed, %d",
+ MAXDIM)));
+ subscripts[nsubscripts++] = arrayelem->subscript;
+ target = estate->datums[arrayelem->arrayparentno];
+ } while (target->dtype == PLPGSQL_DTYPE_ARRAYELEM);
+
+ /* Fetch current value of array datum */
+ exec_eval_datum(estate, target, InvalidOid,
&arraytypeid, &oldarrayval, &oldarrayisnull);
- arrayelemtypeid = get_element_type(arraytypeid);
- if (!OidIsValid(arrayelemtypeid))
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("subscripted object is not an array")));
+ arrayelemtypeid = get_element_type(arraytypeid);
+ if (!OidIsValid(arrayelemtypeid))
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("subscripted object is not an array")));
- /* Evaluate the subscripts, switch into left-to-right order */
- havenullsubscript = false;
- for (i = 0; i < nsubscripts; i++)
- {
- bool subisnull;
+ /*
+ * Evaluate the subscripts, switch into left-to-right
+ * order
+ */
+ havenullsubscript = false;
+ for (i = 0; i < nsubscripts; i++)
+ {
+ bool subisnull;
- subscriptvals[i] =
- exec_eval_integer(estate,
- subscripts[nsubscripts - 1 - i],
- &subisnull);
- havenullsubscript |= subisnull;
- }
+ subscriptvals[i] =
+ exec_eval_integer(estate,
+ subscripts[nsubscripts - 1 - i],
+ &subisnull);
+ havenullsubscript |= subisnull;
+ }
- /*
- * Skip the assignment if we have any nulls, either in the
- * original array value, the subscripts, or the righthand
- * side. This is pretty bogus but it corresponds to the
- * current behavior of ExecEvalArrayRef().
- */
- if (oldarrayisnull || havenullsubscript || *isNull)
- return;
+ /*
+ * Skip the assignment if we have any nulls, either in the
+ * original array value, the subscripts, or the righthand
+ * side. This is pretty bogus but it corresponds to the
+ * current behavior of ExecEvalArrayRef().
+ */
+ if (oldarrayisnull || havenullsubscript || *isNull)
+ return;
- /* Coerce source value to match array element type. */
- coerced_value = exec_simple_cast_value(value,
- valtype,
- arrayelemtypeid,
- -1,
- isNull);
+ /* Coerce source value to match array element type. */
+ coerced_value = exec_simple_cast_value(value,
+ valtype,
+ arrayelemtypeid,
+ -1,
+ isNull);
- /*
- * Build the modified array value.
- */
- get_typlenbyvalalign(arrayelemtypeid,
- &elemtyplen,
- &elemtypbyval,
- &elemtypalign);
-
- newarrayval = array_set((ArrayType *) DatumGetPointer(oldarrayval),
- nsubscripts,
- subscriptvals,
- coerced_value,
- get_typlen(arraytypeid),
- elemtyplen,
- elemtypbyval,
- elemtypalign,
- isNull);
+ /*
+ * Build the modified array value.
+ */
+ get_typlenbyvalalign(arrayelemtypeid,
+ &elemtyplen,
+ &elemtypbyval,
+ &elemtypalign);
+
+ newarrayval = array_set((ArrayType *) DatumGetPointer(oldarrayval),
+ nsubscripts,
+ subscriptvals,
+ coerced_value,
+ get_typlen(arraytypeid),
+ elemtyplen,
+ elemtypbyval,
+ elemtypalign,
+ isNull);
- /*
- * Assign it to the base variable.
- */
- exec_assign_value(estate, target,
- PointerGetDatum(newarrayval),
- arraytypeid, isNull);
+ /*
+ * Assign it to the base variable.
+ */
+ exec_assign_value(estate, target,
+ PointerGetDatum(newarrayval),
+ arraytypeid, isNull);
- /*
- * Avoid leaking the result of exec_simple_cast_value, if it
- * performed a conversion to a pass-by-ref type.
- */
- if (!*isNull && coerced_value != value && !elemtypbyval)
- pfree(DatumGetPointer(coerced_value));
+ /*
+ * Avoid leaking the result of exec_simple_cast_value, if
+ * it performed a conversion to a pass-by-ref type.
+ */
+ if (!*isNull && coerced_value != value && !elemtypbyval)
+ pfree(DatumGetPointer(coerced_value));
- /*
- * Avoid leaking the modified array value, too.
- */
- pfree(newarrayval);
- break;
- }
+ /*
+ * Avoid leaking the modified array value, too.
+ */
+ pfree(newarrayval);
+ break;
+ }
default:
elog(ERROR, "unrecognized dtype: %d", target->dtype);
switch (datum->dtype)
{
case PLPGSQL_DTYPE_VAR:
- {
- PLpgSQL_var *var = (PLpgSQL_var *) datum;
+ {
+ PLpgSQL_var *var = (PLpgSQL_var *) datum;
- *typeid = var->datatype->typoid;
- *value = var->value;
- *isnull = var->isnull;
- if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type of \"%s\" does not match that when preparing the plan",
- var->refname)));
- break;
- }
+ *typeid = var->datatype->typoid;
+ *value = var->value;
+ *isnull = var->isnull;
+ if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type of \"%s\" does not match that when preparing the plan",
+ var->refname)));
+ break;
+ }
case PLPGSQL_DTYPE_ROW:
- {
- PLpgSQL_row *row = (PLpgSQL_row *) datum;
- HeapTuple tup;
-
- if (!row->rowtupdesc) /* should not happen */
- elog(ERROR, "row variable has no tupdesc");
- /* Make sure we have a valid type/typmod setting */
- BlessTupleDesc(row->rowtupdesc);
- tup = make_tuple_from_row(estate, row, row->rowtupdesc);
- if (tup == NULL) /* should not happen */
- elog(ERROR, "row not compatible with its own tupdesc");
- *typeid = row->rowtupdesc->tdtypeid;
- *value = HeapTupleGetDatum(tup);
- *isnull = false;
- if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type of \"%s\" does not match that when preparing the plan",
- row->refname)));
- break;
- }
+ {
+ PLpgSQL_row *row = (PLpgSQL_row *) datum;
+ HeapTuple tup;
+
+ if (!row->rowtupdesc) /* should not happen */
+ elog(ERROR, "row variable has no tupdesc");
+ /* Make sure we have a valid type/typmod setting */
+ BlessTupleDesc(row->rowtupdesc);
+ tup = make_tuple_from_row(estate, row, row->rowtupdesc);
+ if (tup == NULL) /* should not happen */
+ elog(ERROR, "row not compatible with its own tupdesc");
+ *typeid = row->rowtupdesc->tdtypeid;
+ *value = HeapTupleGetDatum(tup);
+ *isnull = false;
+ if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type of \"%s\" does not match that when preparing the plan",
+ row->refname)));
+ break;
+ }
case PLPGSQL_DTYPE_REC:
- {
- PLpgSQL_rec *rec = (PLpgSQL_rec *) datum;
- HeapTupleData worktup;
+ {
+ PLpgSQL_rec *rec = (PLpgSQL_rec *) datum;
+ HeapTupleData worktup;
- if (!HeapTupleIsValid(rec->tup))
- ereport(ERROR,
- (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("record \"%s\" is not assigned yet",
- rec->refname),
- errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
- Assert(rec->tupdesc != NULL);
- /* Make sure we have a valid type/typmod setting */
- BlessTupleDesc(rec->tupdesc);
- /*
- * In a trigger, the NEW and OLD parameters are likely to be
- * on-disk tuples that don't have the desired Datum fields.
- * Copy the tuple body and insert the right values.
- */
- heap_copytuple_with_tuple(rec->tup, &worktup);
- HeapTupleHeaderSetDatumLength(worktup.t_data, worktup.t_len);
- HeapTupleHeaderSetTypeId(worktup.t_data, rec->tupdesc->tdtypeid);
- HeapTupleHeaderSetTypMod(worktup.t_data, rec->tupdesc->tdtypmod);
- *typeid = rec->tupdesc->tdtypeid;
- *value = HeapTupleGetDatum(&worktup);
- *isnull = false;
- if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type of \"%s\" does not match that when preparing the plan",
- rec->refname)));
- break;
- }
+ if (!HeapTupleIsValid(rec->tup))
+ ereport(ERROR,
+ (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+ errmsg("record \"%s\" is not assigned yet",
+ rec->refname),
+ errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
+ Assert(rec->tupdesc != NULL);
+ /* Make sure we have a valid type/typmod setting */
+ BlessTupleDesc(rec->tupdesc);
+
+ /*
+ * In a trigger, the NEW and OLD parameters are likely to
+ * be on-disk tuples that don't have the desired Datum
+ * fields. Copy the tuple body and insert the right
+ * values.
+ */
+ heap_copytuple_with_tuple(rec->tup, &worktup);
+ HeapTupleHeaderSetDatumLength(worktup.t_data, worktup.t_len);
+ HeapTupleHeaderSetTypeId(worktup.t_data, rec->tupdesc->tdtypeid);
+ HeapTupleHeaderSetTypMod(worktup.t_data, rec->tupdesc->tdtypmod);
+ *typeid = rec->tupdesc->tdtypeid;
+ *value = HeapTupleGetDatum(&worktup);
+ *isnull = false;
+ if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type of \"%s\" does not match that when preparing the plan",
+ rec->refname)));
+ break;
+ }
case PLPGSQL_DTYPE_RECFIELD:
- {
- PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum;
- PLpgSQL_rec *rec;
- int fno;
+ {
+ PLpgSQL_recfield *recfield = (PLpgSQL_recfield *) datum;
+ PLpgSQL_rec *rec;
+ int fno;
- rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
- if (!HeapTupleIsValid(rec->tup))
- ereport(ERROR,
+ rec = (PLpgSQL_rec *) (estate->datums[recfield->recparentno]);
+ if (!HeapTupleIsValid(rec->tup))
+ ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("record \"%s\" is not assigned yet",
rec->refname),
errdetail("The tuple structure of a not-yet-assigned record is indeterminate.")));
- fno = SPI_fnumber(rec->tupdesc, recfield->fieldname);
- if (fno == SPI_ERROR_NOATTRIBUTE)
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_COLUMN),
- errmsg("record \"%s\" has no field \"%s\"",
- rec->refname, recfield->fieldname)));
- *typeid = SPI_gettypeid(rec->tupdesc, fno);
- *value = SPI_getbinval(rec->tup, rec->tupdesc, fno, isnull);
- if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type of \"%s.%s\" does not match that when preparing the plan",
- rec->refname, recfield->fieldname)));
- break;
- }
+ fno = SPI_fnumber(rec->tupdesc, recfield->fieldname);
+ if (fno == SPI_ERROR_NOATTRIBUTE)
+ ereport(ERROR,
+ (errcode(ERRCODE_UNDEFINED_COLUMN),
+ errmsg("record \"%s\" has no field \"%s\"",
+ rec->refname, recfield->fieldname)));
+ *typeid = SPI_gettypeid(rec->tupdesc, fno);
+ *value = SPI_getbinval(rec->tup, rec->tupdesc, fno, isnull);
+ if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type of \"%s.%s\" does not match that when preparing the plan",
+ rec->refname, recfield->fieldname)));
+ break;
+ }
case PLPGSQL_DTYPE_TRIGARG:
- {
- PLpgSQL_trigarg *trigarg = (PLpgSQL_trigarg *) datum;
- int tgargno;
-
- *typeid = TEXTOID;
- tgargno = exec_eval_integer(estate, trigarg->argnum, isnull);
- if (*isnull || tgargno < 0 || tgargno >= estate->trig_nargs)
{
- *value = (Datum) 0;
- *isnull = true;
- }
- else
- {
- *value = estate->trig_argv[tgargno];
- *isnull = false;
+ PLpgSQL_trigarg *trigarg = (PLpgSQL_trigarg *) datum;
+ int tgargno;
+
+ *typeid = TEXTOID;
+ tgargno = exec_eval_integer(estate, trigarg->argnum, isnull);
+ if (*isnull || tgargno < 0 || tgargno >= estate->trig_nargs)
+ {
+ *value = (Datum) 0;
+ *isnull = true;
+ }
+ else
+ {
+ *value = estate->trig_argv[tgargno];
+ *isnull = false;
+ }
+ if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
+ ereport(ERROR,
+ (errcode(ERRCODE_DATATYPE_MISMATCH),
+ errmsg("type of tgargv[%d] does not match that when preparing the plan",
+ tgargno)));
+ break;
}
- if (expectedtypeid != InvalidOid && expectedtypeid != *typeid)
- ereport(ERROR,
- (errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("type of tgargv[%d] does not match that when preparing the plan",
- tgargno)));
- break;
- }
default:
elog(ERROR, "unrecognized dtype: %d", datum->dtype);
/*
* Create an EState for evaluation of simple expressions, if there's
- * not one already in the current transaction. The EState is made a
+ * not one already in the current transaction. The EState is made a
* child of TopTransactionContext so it will have the right lifespan.
*/
if (simple_eval_estate == NULL)
}
/*
- * Create an expression context for simple expressions, if there's
- * not one already in the current function call. This must be a
- * child of simple_eval_estate.
+ * Create an expression context for simple expressions, if there's not
+ * one already in the current function call. This must be a child of
+ * simple_eval_estate.
*/
econtext = estate->eval_econtext;
if (econtext == NULL)
/*
* Param list can live in econtext's temporary memory context.
*
- * XXX think about avoiding repeated palloc's for param lists?
- * Beware however that this routine is re-entrant: exec_eval_datum()
- * can call it back for subscript evaluation, and so there can be a
- * need to have more than one active param list.
+ * XXX think about avoiding repeated palloc's for param lists? Beware
+ * however that this routine is re-entrant: exec_eval_datum() can call
+ * it back for subscript evaluation, and so there can be a need to
+ * have more than one active param list.
*/
paramLI = (ParamListInfo)
MemoryContextAllocZero(econtext->ecxt_per_tuple_memory,
* expected if it's from an inheritance-child table of the current
* table, or it might have fewer if the table has had columns added by
* ALTER TABLE. Ignore extra columns and assume NULL for missing
- * columns, the same as heap_getattr would do. We also have to skip
+ * columns, the same as heap_getattr would do. We also have to skip
* over dropped columns in either the source or destination.
*
* If we have no tuple data at all, we'll assign NULL to all columns of
PLpgSQL_var *var;
if (tupdesc->attrs[i]->attisdropped)
- continue; /* leave the column as null */
+ continue; /* leave the column as null */
if (row->varnos[i] < 0) /* should not happen */
elog(ERROR, "dropped rowtype entry for non-dropped column");
case T_FieldStore:
{
- FieldStore *expr = (FieldStore *) node;
+ FieldStore *expr = (FieldStore *) node;
if (!exec_simple_check_node((Node *) expr->arg))
return FALSE;
case T_RowExpr:
{
- RowExpr *expr = (RowExpr *) node;
+ RowExpr *expr = (RowExpr *) node;
if (!exec_simple_check_node((Node *) expr->args))
return FALSE;
/*
* Nothing to do at subtransaction events
*
- * XXX really? Maybe subtransactions need to have their own
- * simple_eval_estate? It would get a lot messier, so for now
+ * XXX really? Maybe subtransactions need to have their own
+ * simple_eval_estate? It would get a lot messier, so for now
* let's assume we don't need that.
*/
case XACT_EVENT_START_SUB:
expr->expr_simple_next = NULL;
}
active_simple_exprs = NULL;
+
/*
- * If we are doing a clean transaction shutdown, free the EState
- * (so that any remaining resources will be released correctly).
- * In an abort, we expect the regular abort recovery procedures to
- * release everything of interest.
+ * If we are doing a clean transaction shutdown, free the
+ * EState (so that any remaining resources will be released
+ * correctly). In an abort, we expect the regular abort
+ * recovery procedures to release everything of interest.
*/
if (event == XACT_EVENT_COMMIT && simple_eval_estate)
FreeExecutorState(simple_eval_estate);
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.34 2004/07/31 23:04:56 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/pl_funcs.c,v 1.35 2004/08/29 05:07:01 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
{
/* Quoted identifier: copy, collapsing out doubled quotes */
- curident = palloc(strlen(s) + 1); /* surely enough room */
+ curident = palloc(strlen(s) + 1); /* surely enough room */
cp = curident;
s++;
while (*s)
s++;
*cp = '\0';
/* Truncate to NAMEDATALEN */
- truncate_identifier(curident, cp-curident, false);
+ truncate_identifier(curident, cp - curident, false);
}
else
{
while (*s && *s != '.' && !isspace((unsigned char) *s))
s++;
/* Downcase and truncate to NAMEDATALEN */
- curident = downcase_truncate_identifier(thisstart, s-thisstart,
+ curident = downcase_truncate_identifier(thisstart, s - thisstart,
false);
}
*
* Copyright (c) 2003, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plerrcodes.h,v 1.3 2004/08/02 17:03:48 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plerrcodes.h,v 1.4 2004/08/29 05:07:01 momjian Exp $
*
*-------------------------------------------------------------------------
*/
/* Success and warnings can't be caught, so omit them from table */
-{ "sql_statement_not_yet_complete", ERRCODE_SQL_STATEMENT_NOT_YET_COMPLETE },
-{ "connection_exception", ERRCODE_CONNECTION_EXCEPTION },
-{ "connection_does_not_exist", ERRCODE_CONNECTION_DOES_NOT_EXIST },
-{ "connection_failure", ERRCODE_CONNECTION_FAILURE },
-{ "sqlclient_unable_to_establish_sqlconnection", ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION },
-{ "sqlserver_rejected_establishment_of_sqlconnection", ERRCODE_SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION },
-{ "transaction_resolution_unknown", ERRCODE_TRANSACTION_RESOLUTION_UNKNOWN },
-{ "protocol_violation", ERRCODE_PROTOCOL_VIOLATION },
-{ "triggered_action_exception", ERRCODE_TRIGGERED_ACTION_EXCEPTION },
-{ "feature_not_supported", ERRCODE_FEATURE_NOT_SUPPORTED },
-{ "invalid_transaction_initiation", ERRCODE_INVALID_TRANSACTION_INITIATION },
-{ "locator_exception", ERRCODE_LOCATOR_EXCEPTION },
-{ "invalid_locator_specification", ERRCODE_L_E_INVALID_SPECIFICATION },
-{ "invalid_grantor", ERRCODE_INVALID_GRANTOR },
-{ "invalid_grant_operation", ERRCODE_INVALID_GRANT_OPERATION },
-{ "invalid_role_specification", ERRCODE_INVALID_ROLE_SPECIFICATION },
-{ "cardinality_violation", ERRCODE_CARDINALITY_VIOLATION },
-{ "data_exception", ERRCODE_DATA_EXCEPTION },
-{ "array_element_error", ERRCODE_ARRAY_ELEMENT_ERROR },
-{ "array_subscript_error", ERRCODE_ARRAY_SUBSCRIPT_ERROR },
-{ "character_not_in_repertoire", ERRCODE_CHARACTER_NOT_IN_REPERTOIRE },
-{ "datetime_field_overflow", ERRCODE_DATETIME_FIELD_OVERFLOW },
-{ "datetime_value_out_of_range", ERRCODE_DATETIME_VALUE_OUT_OF_RANGE },
-{ "division_by_zero", ERRCODE_DIVISION_BY_ZERO },
-{ "error_in_assignment", ERRCODE_ERROR_IN_ASSIGNMENT },
-{ "escape_character_conflict", ERRCODE_ESCAPE_CHARACTER_CONFLICT },
-{ "indicator_overflow", ERRCODE_INDICATOR_OVERFLOW },
-{ "interval_field_overflow", ERRCODE_INTERVAL_FIELD_OVERFLOW },
-{ "invalid_argument_for_logarithm", ERRCODE_INVALID_ARGUMENT_FOR_LOG },
-{ "invalid_argument_for_power_function", ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION },
-{ "invalid_argument_for_width_bucket_function", ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION },
-{ "invalid_character_value_for_cast", ERRCODE_INVALID_CHARACTER_VALUE_FOR_CAST },
-{ "invalid_datetime_format", ERRCODE_INVALID_DATETIME_FORMAT },
-{ "invalid_escape_character", ERRCODE_INVALID_ESCAPE_CHARACTER },
-{ "invalid_escape_octet", ERRCODE_INVALID_ESCAPE_OCTET },
-{ "invalid_escape_sequence", ERRCODE_INVALID_ESCAPE_SEQUENCE },
-{ "invalid_indicator_parameter_value", ERRCODE_INVALID_INDICATOR_PARAMETER_VALUE },
-{ "invalid_limit_value", ERRCODE_INVALID_LIMIT_VALUE },
-{ "invalid_parameter_value", ERRCODE_INVALID_PARAMETER_VALUE },
-{ "invalid_regular_expression", ERRCODE_INVALID_REGULAR_EXPRESSION },
-{ "invalid_time_zone_displacement_value", ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE },
-{ "invalid_use_of_escape_character", ERRCODE_INVALID_USE_OF_ESCAPE_CHARACTER },
-{ "most_specific_type_mismatch", ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH },
-{ "null_value_not_allowed", ERRCODE_NULL_VALUE_NOT_ALLOWED },
-{ "null_value_no_indicator_parameter", ERRCODE_NULL_VALUE_NO_INDICATOR_PARAMETER },
-{ "numeric_value_out_of_range", ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE },
-{ "string_data_length_mismatch", ERRCODE_STRING_DATA_LENGTH_MISMATCH },
-{ "string_data_right_truncation", ERRCODE_STRING_DATA_RIGHT_TRUNCATION },
-{ "substring_error", ERRCODE_SUBSTRING_ERROR },
-{ "trim_error", ERRCODE_TRIM_ERROR },
-{ "unterminated_c_string", ERRCODE_UNTERMINATED_C_STRING },
-{ "zero_length_character_string", ERRCODE_ZERO_LENGTH_CHARACTER_STRING },
-{ "floating_point_exception", ERRCODE_FLOATING_POINT_EXCEPTION },
-{ "invalid_text_representation", ERRCODE_INVALID_TEXT_REPRESENTATION },
-{ "invalid_binary_representation", ERRCODE_INVALID_BINARY_REPRESENTATION },
-{ "bad_copy_file_format", ERRCODE_BAD_COPY_FILE_FORMAT },
-{ "untranslatable_character", ERRCODE_UNTRANSLATABLE_CHARACTER },
-{ "integrity_constraint_violation", ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION },
-{ "restrict_violation", ERRCODE_RESTRICT_VIOLATION },
-{ "not_null_violation", ERRCODE_NOT_NULL_VIOLATION },
-{ "foreign_key_violation", ERRCODE_FOREIGN_KEY_VIOLATION },
-{ "unique_violation", ERRCODE_UNIQUE_VIOLATION },
-{ "check_violation", ERRCODE_CHECK_VIOLATION },
-{ "invalid_cursor_state", ERRCODE_INVALID_CURSOR_STATE },
-{ "invalid_transaction_state", ERRCODE_INVALID_TRANSACTION_STATE },
-{ "active_sql_transaction", ERRCODE_ACTIVE_SQL_TRANSACTION },
-{ "branch_transaction_already_active", ERRCODE_BRANCH_TRANSACTION_ALREADY_ACTIVE },
-{ "held_cursor_requires_same_isolation_level", ERRCODE_HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL },
-{ "inappropriate_access_mode_for_branch_transaction", ERRCODE_INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION },
-{ "inappropriate_isolation_level_for_branch_transaction", ERRCODE_INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION },
-{ "no_active_sql_transaction_for_branch_transaction", ERRCODE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION },
-{ "read_only_sql_transaction", ERRCODE_READ_ONLY_SQL_TRANSACTION },
-{ "schema_and_data_statement_mixing_not_supported", ERRCODE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED },
-{ "no_active_sql_transaction", ERRCODE_NO_ACTIVE_SQL_TRANSACTION },
-{ "in_failed_sql_transaction", ERRCODE_IN_FAILED_SQL_TRANSACTION },
-{ "invalid_sql_statement_name", ERRCODE_INVALID_SQL_STATEMENT_NAME },
-{ "triggered_data_change_violation", ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION },
-{ "invalid_authorization_specification", ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION },
-{ "dependent_privilege_descriptors_still_exist", ERRCODE_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST },
-{ "dependent_objects_still_exist", ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST },
-{ "invalid_transaction_termination", ERRCODE_INVALID_TRANSACTION_TERMINATION },
-{ "sql_routine_exception", ERRCODE_SQL_ROUTINE_EXCEPTION },
-{ "function_executed_no_return_statement", ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT },
-{ "modifying_sql_data_not_permitted", ERRCODE_S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED },
-{ "prohibited_sql_statement_attempted", ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED },
-{ "reading_sql_data_not_permitted", ERRCODE_S_R_E_READING_SQL_DATA_NOT_PERMITTED },
-{ "invalid_cursor_name", ERRCODE_INVALID_CURSOR_NAME },
-{ "external_routine_exception", ERRCODE_EXTERNAL_ROUTINE_EXCEPTION },
-{ "containing_sql_not_permitted", ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED },
-{ "modifying_sql_data_not_permitted", ERRCODE_E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED },
-{ "prohibited_sql_statement_attempted", ERRCODE_E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED },
-{ "reading_sql_data_not_permitted", ERRCODE_E_R_E_READING_SQL_DATA_NOT_PERMITTED },
-{ "external_routine_invocation_exception", ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION },
-{ "invalid_sqlstate_returned", ERRCODE_E_R_I_E_INVALID_SQLSTATE_RETURNED },
-{ "null_value_not_allowed", ERRCODE_E_R_I_E_NULL_VALUE_NOT_ALLOWED },
-{ "trigger_protocol_violated", ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED },
-{ "srf_protocol_violated", ERRCODE_E_R_I_E_SRF_PROTOCOL_VIOLATED },
-{ "savepoint_exception", ERRCODE_SAVEPOINT_EXCEPTION },
-{ "invalid_savepoint_specification", ERRCODE_S_E_INVALID_SPECIFICATION },
-{ "invalid_catalog_name", ERRCODE_INVALID_CATALOG_NAME },
-{ "invalid_schema_name", ERRCODE_INVALID_SCHEMA_NAME },
-{ "transaction_rollback", ERRCODE_TRANSACTION_ROLLBACK },
-{ "transaction_integrity_constraint_violation", ERRCODE_T_R_INTEGRITY_CONSTRAINT_VIOLATION },
-{ "serialization_failure", ERRCODE_T_R_SERIALIZATION_FAILURE },
-{ "statement_completion_unknown", ERRCODE_T_R_STATEMENT_COMPLETION_UNKNOWN },
-{ "deadlock_detected", ERRCODE_T_R_DEADLOCK_DETECTED },
-{ "syntax_error_or_access_rule_violation", ERRCODE_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION },
-{ "syntax_error", ERRCODE_SYNTAX_ERROR },
-{ "insufficient_privilege", ERRCODE_INSUFFICIENT_PRIVILEGE },
-{ "cannot_coerce", ERRCODE_CANNOT_COERCE },
-{ "grouping_error", ERRCODE_GROUPING_ERROR },
-{ "invalid_foreign_key", ERRCODE_INVALID_FOREIGN_KEY },
-{ "invalid_name", ERRCODE_INVALID_NAME },
-{ "name_too_long", ERRCODE_NAME_TOO_LONG },
-{ "reserved_name", ERRCODE_RESERVED_NAME },
-{ "datatype_mismatch", ERRCODE_DATATYPE_MISMATCH },
-{ "indeterminate_datatype", ERRCODE_INDETERMINATE_DATATYPE },
-{ "wrong_object_type", ERRCODE_WRONG_OBJECT_TYPE },
-{ "undefined_column", ERRCODE_UNDEFINED_COLUMN },
-{ "undefined_cursor", ERRCODE_UNDEFINED_CURSOR },
-{ "undefined_database", ERRCODE_UNDEFINED_DATABASE },
-{ "undefined_function", ERRCODE_UNDEFINED_FUNCTION },
-{ "undefined_pstatement", ERRCODE_UNDEFINED_PSTATEMENT },
-{ "undefined_schema", ERRCODE_UNDEFINED_SCHEMA },
-{ "undefined_table", ERRCODE_UNDEFINED_TABLE },
-{ "undefined_parameter", ERRCODE_UNDEFINED_PARAMETER },
-{ "undefined_object", ERRCODE_UNDEFINED_OBJECT },
-{ "duplicate_column", ERRCODE_DUPLICATE_COLUMN },
-{ "duplicate_cursor", ERRCODE_DUPLICATE_CURSOR },
-{ "duplicate_database", ERRCODE_DUPLICATE_DATABASE },
-{ "duplicate_function", ERRCODE_DUPLICATE_FUNCTION },
-{ "duplicate_prepared_statement", ERRCODE_DUPLICATE_PSTATEMENT },
-{ "duplicate_schema", ERRCODE_DUPLICATE_SCHEMA },
-{ "duplicate_table", ERRCODE_DUPLICATE_TABLE },
-{ "duplicate_alias", ERRCODE_DUPLICATE_ALIAS },
-{ "duplicate_object", ERRCODE_DUPLICATE_OBJECT },
-{ "ambiguous_column", ERRCODE_AMBIGUOUS_COLUMN },
-{ "ambiguous_function", ERRCODE_AMBIGUOUS_FUNCTION },
-{ "ambiguous_parameter", ERRCODE_AMBIGUOUS_PARAMETER },
-{ "ambiguous_alias", ERRCODE_AMBIGUOUS_ALIAS },
-{ "invalid_column_reference", ERRCODE_INVALID_COLUMN_REFERENCE },
-{ "invalid_column_definition", ERRCODE_INVALID_COLUMN_DEFINITION },
-{ "invalid_cursor_definition", ERRCODE_INVALID_CURSOR_DEFINITION },
-{ "invalid_database_definition", ERRCODE_INVALID_DATABASE_DEFINITION },
-{ "invalid_function_definition", ERRCODE_INVALID_FUNCTION_DEFINITION },
-{ "invalid_prepared_statement_definition", ERRCODE_INVALID_PSTATEMENT_DEFINITION },
-{ "invalid_schema_definition", ERRCODE_INVALID_SCHEMA_DEFINITION },
-{ "invalid_table_definition", ERRCODE_INVALID_TABLE_DEFINITION },
-{ "invalid_object_definition", ERRCODE_INVALID_OBJECT_DEFINITION },
-{ "with_check_option_violation", ERRCODE_WITH_CHECK_OPTION_VIOLATION },
-{ "insufficient_resources", ERRCODE_INSUFFICIENT_RESOURCES },
-{ "disk_full", ERRCODE_DISK_FULL },
-{ "out_of_memory", ERRCODE_OUT_OF_MEMORY },
-{ "too_many_connections", ERRCODE_TOO_MANY_CONNECTIONS },
-{ "program_limit_exceeded", ERRCODE_PROGRAM_LIMIT_EXCEEDED },
-{ "statement_too_complex", ERRCODE_STATEMENT_TOO_COMPLEX },
-{ "too_many_columns", ERRCODE_TOO_MANY_COLUMNS },
-{ "too_many_arguments", ERRCODE_TOO_MANY_ARGUMENTS },
-{ "object_not_in_prerequisite_state", ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE },
-{ "object_in_use", ERRCODE_OBJECT_IN_USE },
-{ "cant_change_runtime_param", ERRCODE_CANT_CHANGE_RUNTIME_PARAM },
-{ "operator_intervention", ERRCODE_OPERATOR_INTERVENTION },
-{ "query_canceled", ERRCODE_QUERY_CANCELED },
-{ "admin_shutdown", ERRCODE_ADMIN_SHUTDOWN },
-{ "crash_shutdown", ERRCODE_CRASH_SHUTDOWN },
-{ "cannot_connect_now", ERRCODE_CANNOT_CONNECT_NOW },
-{ "io_error", ERRCODE_IO_ERROR },
-{ "undefined_file", ERRCODE_UNDEFINED_FILE },
-{ "duplicate_file", ERRCODE_DUPLICATE_FILE },
-{ "config_file_error", ERRCODE_CONFIG_FILE_ERROR },
-{ "lock_file_exists", ERRCODE_LOCK_FILE_EXISTS },
-{ "plpgsql_error", ERRCODE_PLPGSQL_ERROR },
-{ "raise_exception", ERRCODE_RAISE_EXCEPTION },
-{ "internal_error", ERRCODE_INTERNAL_ERROR },
-{ "data_corrupted", ERRCODE_DATA_CORRUPTED },
-{ "index_corrupted", ERRCODE_INDEX_CORRUPTED },
+{
+ "sql_statement_not_yet_complete", ERRCODE_SQL_STATEMENT_NOT_YET_COMPLETE
+},
+
+{
+ "connection_exception", ERRCODE_CONNECTION_EXCEPTION
+},
+
+{
+ "connection_does_not_exist", ERRCODE_CONNECTION_DOES_NOT_EXIST
+},
+
+{
+ "connection_failure", ERRCODE_CONNECTION_FAILURE
+},
+
+{
+ "sqlclient_unable_to_establish_sqlconnection", ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION
+},
+
+{
+ "sqlserver_rejected_establishment_of_sqlconnection", ERRCODE_SQLSERVER_REJECTED_ESTABLISHMENT_OF_SQLCONNECTION
+},
+
+{
+ "transaction_resolution_unknown", ERRCODE_TRANSACTION_RESOLUTION_UNKNOWN
+},
+
+{
+ "protocol_violation", ERRCODE_PROTOCOL_VIOLATION
+},
+
+{
+ "triggered_action_exception", ERRCODE_TRIGGERED_ACTION_EXCEPTION
+},
+
+{
+ "feature_not_supported", ERRCODE_FEATURE_NOT_SUPPORTED
+},
+
+{
+ "invalid_transaction_initiation", ERRCODE_INVALID_TRANSACTION_INITIATION
+},
+
+{
+ "locator_exception", ERRCODE_LOCATOR_EXCEPTION
+},
+
+{
+ "invalid_locator_specification", ERRCODE_L_E_INVALID_SPECIFICATION
+},
+
+{
+ "invalid_grantor", ERRCODE_INVALID_GRANTOR
+},
+
+{
+ "invalid_grant_operation", ERRCODE_INVALID_GRANT_OPERATION
+},
+
+{
+ "invalid_role_specification", ERRCODE_INVALID_ROLE_SPECIFICATION
+},
+
+{
+ "cardinality_violation", ERRCODE_CARDINALITY_VIOLATION
+},
+
+{
+ "data_exception", ERRCODE_DATA_EXCEPTION
+},
+
+{
+ "array_element_error", ERRCODE_ARRAY_ELEMENT_ERROR
+},
+
+{
+ "array_subscript_error", ERRCODE_ARRAY_SUBSCRIPT_ERROR
+},
+
+{
+ "character_not_in_repertoire", ERRCODE_CHARACTER_NOT_IN_REPERTOIRE
+},
+
+{
+ "datetime_field_overflow", ERRCODE_DATETIME_FIELD_OVERFLOW
+},
+
+{
+ "datetime_value_out_of_range", ERRCODE_DATETIME_VALUE_OUT_OF_RANGE
+},
+
+{
+ "division_by_zero", ERRCODE_DIVISION_BY_ZERO
+},
+
+{
+ "error_in_assignment", ERRCODE_ERROR_IN_ASSIGNMENT
+},
+
+{
+ "escape_character_conflict", ERRCODE_ESCAPE_CHARACTER_CONFLICT
+},
+
+{
+ "indicator_overflow", ERRCODE_INDICATOR_OVERFLOW
+},
+
+{
+ "interval_field_overflow", ERRCODE_INTERVAL_FIELD_OVERFLOW
+},
+
+{
+ "invalid_argument_for_logarithm", ERRCODE_INVALID_ARGUMENT_FOR_LOG
+},
+
+{
+ "invalid_argument_for_power_function", ERRCODE_INVALID_ARGUMENT_FOR_POWER_FUNCTION
+},
+
+{
+ "invalid_argument_for_width_bucket_function", ERRCODE_INVALID_ARGUMENT_FOR_WIDTH_BUCKET_FUNCTION
+},
+
+{
+ "invalid_character_value_for_cast", ERRCODE_INVALID_CHARACTER_VALUE_FOR_CAST
+},
+
+{
+ "invalid_datetime_format", ERRCODE_INVALID_DATETIME_FORMAT
+},
+
+{
+ "invalid_escape_character", ERRCODE_INVALID_ESCAPE_CHARACTER
+},
+
+{
+ "invalid_escape_octet", ERRCODE_INVALID_ESCAPE_OCTET
+},
+
+{
+ "invalid_escape_sequence", ERRCODE_INVALID_ESCAPE_SEQUENCE
+},
+
+{
+ "invalid_indicator_parameter_value", ERRCODE_INVALID_INDICATOR_PARAMETER_VALUE
+},
+
+{
+ "invalid_limit_value", ERRCODE_INVALID_LIMIT_VALUE
+},
+
+{
+ "invalid_parameter_value", ERRCODE_INVALID_PARAMETER_VALUE
+},
+
+{
+ "invalid_regular_expression", ERRCODE_INVALID_REGULAR_EXPRESSION
+},
+
+{
+ "invalid_time_zone_displacement_value", ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE
+},
+
+{
+ "invalid_use_of_escape_character", ERRCODE_INVALID_USE_OF_ESCAPE_CHARACTER
+},
+
+{
+ "most_specific_type_mismatch", ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH
+},
+
+{
+ "null_value_not_allowed", ERRCODE_NULL_VALUE_NOT_ALLOWED
+},
+
+{
+ "null_value_no_indicator_parameter", ERRCODE_NULL_VALUE_NO_INDICATOR_PARAMETER
+},
+
+{
+ "numeric_value_out_of_range", ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE
+},
+
+{
+ "string_data_length_mismatch", ERRCODE_STRING_DATA_LENGTH_MISMATCH
+},
+
+{
+ "string_data_right_truncation", ERRCODE_STRING_DATA_RIGHT_TRUNCATION
+},
+
+{
+ "substring_error", ERRCODE_SUBSTRING_ERROR
+},
+
+{
+ "trim_error", ERRCODE_TRIM_ERROR
+},
+
+{
+ "unterminated_c_string", ERRCODE_UNTERMINATED_C_STRING
+},
+
+{
+ "zero_length_character_string", ERRCODE_ZERO_LENGTH_CHARACTER_STRING
+},
+
+{
+ "floating_point_exception", ERRCODE_FLOATING_POINT_EXCEPTION
+},
+
+{
+ "invalid_text_representation", ERRCODE_INVALID_TEXT_REPRESENTATION
+},
+
+{
+ "invalid_binary_representation", ERRCODE_INVALID_BINARY_REPRESENTATION
+},
+
+{
+ "bad_copy_file_format", ERRCODE_BAD_COPY_FILE_FORMAT
+},
+
+{
+ "untranslatable_character", ERRCODE_UNTRANSLATABLE_CHARACTER
+},
+
+{
+ "integrity_constraint_violation", ERRCODE_INTEGRITY_CONSTRAINT_VIOLATION
+},
+
+{
+ "restrict_violation", ERRCODE_RESTRICT_VIOLATION
+},
+
+{
+ "not_null_violation", ERRCODE_NOT_NULL_VIOLATION
+},
+
+{
+ "foreign_key_violation", ERRCODE_FOREIGN_KEY_VIOLATION
+},
+
+{
+ "unique_violation", ERRCODE_UNIQUE_VIOLATION
+},
+
+{
+ "check_violation", ERRCODE_CHECK_VIOLATION
+},
+
+{
+ "invalid_cursor_state", ERRCODE_INVALID_CURSOR_STATE
+},
+
+{
+ "invalid_transaction_state", ERRCODE_INVALID_TRANSACTION_STATE
+},
+
+{
+ "active_sql_transaction", ERRCODE_ACTIVE_SQL_TRANSACTION
+},
+
+{
+ "branch_transaction_already_active", ERRCODE_BRANCH_TRANSACTION_ALREADY_ACTIVE
+},
+
+{
+ "held_cursor_requires_same_isolation_level", ERRCODE_HELD_CURSOR_REQUIRES_SAME_ISOLATION_LEVEL
+},
+
+{
+ "inappropriate_access_mode_for_branch_transaction", ERRCODE_INAPPROPRIATE_ACCESS_MODE_FOR_BRANCH_TRANSACTION
+},
+
+{
+ "inappropriate_isolation_level_for_branch_transaction", ERRCODE_INAPPROPRIATE_ISOLATION_LEVEL_FOR_BRANCH_TRANSACTION
+},
+
+{
+ "no_active_sql_transaction_for_branch_transaction", ERRCODE_NO_ACTIVE_SQL_TRANSACTION_FOR_BRANCH_TRANSACTION
+},
+
+{
+ "read_only_sql_transaction", ERRCODE_READ_ONLY_SQL_TRANSACTION
+},
+
+{
+ "schema_and_data_statement_mixing_not_supported", ERRCODE_SCHEMA_AND_DATA_STATEMENT_MIXING_NOT_SUPPORTED
+},
+
+{
+ "no_active_sql_transaction", ERRCODE_NO_ACTIVE_SQL_TRANSACTION
+},
+
+{
+ "in_failed_sql_transaction", ERRCODE_IN_FAILED_SQL_TRANSACTION
+},
+
+{
+ "invalid_sql_statement_name", ERRCODE_INVALID_SQL_STATEMENT_NAME
+},
+
+{
+ "triggered_data_change_violation", ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION
+},
+
+{
+ "invalid_authorization_specification", ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION
+},
+
+{
+ "dependent_privilege_descriptors_still_exist", ERRCODE_DEPENDENT_PRIVILEGE_DESCRIPTORS_STILL_EXIST
+},
+
+{
+ "dependent_objects_still_exist", ERRCODE_DEPENDENT_OBJECTS_STILL_EXIST
+},
+
+{
+ "invalid_transaction_termination", ERRCODE_INVALID_TRANSACTION_TERMINATION
+},
+
+{
+ "sql_routine_exception", ERRCODE_SQL_ROUTINE_EXCEPTION
+},
+
+{
+ "function_executed_no_return_statement", ERRCODE_S_R_E_FUNCTION_EXECUTED_NO_RETURN_STATEMENT
+},
+
+{
+ "modifying_sql_data_not_permitted", ERRCODE_S_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED
+},
+
+{
+ "prohibited_sql_statement_attempted", ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED
+},
+
+{
+ "reading_sql_data_not_permitted", ERRCODE_S_R_E_READING_SQL_DATA_NOT_PERMITTED
+},
+
+{
+ "invalid_cursor_name", ERRCODE_INVALID_CURSOR_NAME
+},
+
+{
+ "external_routine_exception", ERRCODE_EXTERNAL_ROUTINE_EXCEPTION
+},
+
+{
+ "containing_sql_not_permitted", ERRCODE_E_R_E_CONTAINING_SQL_NOT_PERMITTED
+},
+
+{
+ "modifying_sql_data_not_permitted", ERRCODE_E_R_E_MODIFYING_SQL_DATA_NOT_PERMITTED
+},
+
+{
+ "prohibited_sql_statement_attempted", ERRCODE_E_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED
+},
+
+{
+ "reading_sql_data_not_permitted", ERRCODE_E_R_E_READING_SQL_DATA_NOT_PERMITTED
+},
+
+{
+ "external_routine_invocation_exception", ERRCODE_EXTERNAL_ROUTINE_INVOCATION_EXCEPTION
+},
+
+{
+ "invalid_sqlstate_returned", ERRCODE_E_R_I_E_INVALID_SQLSTATE_RETURNED
+},
+
+{
+ "null_value_not_allowed", ERRCODE_E_R_I_E_NULL_VALUE_NOT_ALLOWED
+},
+
+{
+ "trigger_protocol_violated", ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED
+},
+
+{
+ "srf_protocol_violated", ERRCODE_E_R_I_E_SRF_PROTOCOL_VIOLATED
+},
+
+{
+ "savepoint_exception", ERRCODE_SAVEPOINT_EXCEPTION
+},
+
+{
+ "invalid_savepoint_specification", ERRCODE_S_E_INVALID_SPECIFICATION
+},
+
+{
+ "invalid_catalog_name", ERRCODE_INVALID_CATALOG_NAME
+},
+
+{
+ "invalid_schema_name", ERRCODE_INVALID_SCHEMA_NAME
+},
+
+{
+ "transaction_rollback", ERRCODE_TRANSACTION_ROLLBACK
+},
+
+{
+ "transaction_integrity_constraint_violation", ERRCODE_T_R_INTEGRITY_CONSTRAINT_VIOLATION
+},
+
+{
+ "serialization_failure", ERRCODE_T_R_SERIALIZATION_FAILURE
+},
+
+{
+ "statement_completion_unknown", ERRCODE_T_R_STATEMENT_COMPLETION_UNKNOWN
+},
+
+{
+ "deadlock_detected", ERRCODE_T_R_DEADLOCK_DETECTED
+},
+
+{
+ "syntax_error_or_access_rule_violation", ERRCODE_SYNTAX_ERROR_OR_ACCESS_RULE_VIOLATION
+},
+
+{
+ "syntax_error", ERRCODE_SYNTAX_ERROR
+},
+
+{
+ "insufficient_privilege", ERRCODE_INSUFFICIENT_PRIVILEGE
+},
+
+{
+ "cannot_coerce", ERRCODE_CANNOT_COERCE
+},
+
+{
+ "grouping_error", ERRCODE_GROUPING_ERROR
+},
+
+{
+ "invalid_foreign_key", ERRCODE_INVALID_FOREIGN_KEY
+},
+
+{
+ "invalid_name", ERRCODE_INVALID_NAME
+},
+
+{
+ "name_too_long", ERRCODE_NAME_TOO_LONG
+},
+
+{
+ "reserved_name", ERRCODE_RESERVED_NAME
+},
+
+{
+ "datatype_mismatch", ERRCODE_DATATYPE_MISMATCH
+},
+
+{
+ "indeterminate_datatype", ERRCODE_INDETERMINATE_DATATYPE
+},
+
+{
+ "wrong_object_type", ERRCODE_WRONG_OBJECT_TYPE
+},
+
+{
+ "undefined_column", ERRCODE_UNDEFINED_COLUMN
+},
+
+{
+ "undefined_cursor", ERRCODE_UNDEFINED_CURSOR
+},
+
+{
+ "undefined_database", ERRCODE_UNDEFINED_DATABASE
+},
+
+{
+ "undefined_function", ERRCODE_UNDEFINED_FUNCTION
+},
+
+{
+ "undefined_pstatement", ERRCODE_UNDEFINED_PSTATEMENT
+},
+
+{
+ "undefined_schema", ERRCODE_UNDEFINED_SCHEMA
+},
+
+{
+ "undefined_table", ERRCODE_UNDEFINED_TABLE
+},
+
+{
+ "undefined_parameter", ERRCODE_UNDEFINED_PARAMETER
+},
+
+{
+ "undefined_object", ERRCODE_UNDEFINED_OBJECT
+},
+
+{
+ "duplicate_column", ERRCODE_DUPLICATE_COLUMN
+},
+
+{
+ "duplicate_cursor", ERRCODE_DUPLICATE_CURSOR
+},
+
+{
+ "duplicate_database", ERRCODE_DUPLICATE_DATABASE
+},
+
+{
+ "duplicate_function", ERRCODE_DUPLICATE_FUNCTION
+},
+
+{
+ "duplicate_prepared_statement", ERRCODE_DUPLICATE_PSTATEMENT
+},
+
+{
+ "duplicate_schema", ERRCODE_DUPLICATE_SCHEMA
+},
+
+{
+ "duplicate_table", ERRCODE_DUPLICATE_TABLE
+},
+
+{
+ "duplicate_alias", ERRCODE_DUPLICATE_ALIAS
+},
+
+{
+ "duplicate_object", ERRCODE_DUPLICATE_OBJECT
+},
+
+{
+ "ambiguous_column", ERRCODE_AMBIGUOUS_COLUMN
+},
+
+{
+ "ambiguous_function", ERRCODE_AMBIGUOUS_FUNCTION
+},
+
+{
+ "ambiguous_parameter", ERRCODE_AMBIGUOUS_PARAMETER
+},
+
+{
+ "ambiguous_alias", ERRCODE_AMBIGUOUS_ALIAS
+},
+
+{
+ "invalid_column_reference", ERRCODE_INVALID_COLUMN_REFERENCE
+},
+
+{
+ "invalid_column_definition", ERRCODE_INVALID_COLUMN_DEFINITION
+},
+
+{
+ "invalid_cursor_definition", ERRCODE_INVALID_CURSOR_DEFINITION
+},
+
+{
+ "invalid_database_definition", ERRCODE_INVALID_DATABASE_DEFINITION
+},
+
+{
+ "invalid_function_definition", ERRCODE_INVALID_FUNCTION_DEFINITION
+},
+
+{
+ "invalid_prepared_statement_definition", ERRCODE_INVALID_PSTATEMENT_DEFINITION
+},
+
+{
+ "invalid_schema_definition", ERRCODE_INVALID_SCHEMA_DEFINITION
+},
+
+{
+ "invalid_table_definition", ERRCODE_INVALID_TABLE_DEFINITION
+},
+
+{
+ "invalid_object_definition", ERRCODE_INVALID_OBJECT_DEFINITION
+},
+
+{
+ "with_check_option_violation", ERRCODE_WITH_CHECK_OPTION_VIOLATION
+},
+
+{
+ "insufficient_resources", ERRCODE_INSUFFICIENT_RESOURCES
+},
+
+{
+ "disk_full", ERRCODE_DISK_FULL
+},
+
+{
+ "out_of_memory", ERRCODE_OUT_OF_MEMORY
+},
+
+{
+ "too_many_connections", ERRCODE_TOO_MANY_CONNECTIONS
+},
+
+{
+ "program_limit_exceeded", ERRCODE_PROGRAM_LIMIT_EXCEEDED
+},
+
+{
+ "statement_too_complex", ERRCODE_STATEMENT_TOO_COMPLEX
+},
+
+{
+ "too_many_columns", ERRCODE_TOO_MANY_COLUMNS
+},
+
+{
+ "too_many_arguments", ERRCODE_TOO_MANY_ARGUMENTS
+},
+
+{
+ "object_not_in_prerequisite_state", ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE
+},
+
+{
+ "object_in_use", ERRCODE_OBJECT_IN_USE
+},
+
+{
+ "cant_change_runtime_param", ERRCODE_CANT_CHANGE_RUNTIME_PARAM
+},
+
+{
+ "operator_intervention", ERRCODE_OPERATOR_INTERVENTION
+},
+
+{
+ "query_canceled", ERRCODE_QUERY_CANCELED
+},
+
+{
+ "admin_shutdown", ERRCODE_ADMIN_SHUTDOWN
+},
+
+{
+ "crash_shutdown", ERRCODE_CRASH_SHUTDOWN
+},
+
+{
+ "cannot_connect_now", ERRCODE_CANNOT_CONNECT_NOW
+},
+
+{
+ "io_error", ERRCODE_IO_ERROR
+},
+
+{
+ "undefined_file", ERRCODE_UNDEFINED_FILE
+},
+
+{
+ "duplicate_file", ERRCODE_DUPLICATE_FILE
+},
+
+{
+ "config_file_error", ERRCODE_CONFIG_FILE_ERROR
+},
+
+{
+ "lock_file_exists", ERRCODE_LOCK_FILE_EXISTS
+},
+
+{
+ "plpgsql_error", ERRCODE_PLPGSQL_ERROR
+},
+
+{
+ "raise_exception", ERRCODE_RAISE_EXCEPTION
+},
+
+{
+ "internal_error", ERRCODE_INTERNAL_ERROR
+},
+
+{
+ "data_corrupted", ERRCODE_DATA_CORRUPTED
+},
+
+{
+ "index_corrupted", ERRCODE_INDEX_CORRUPTED
+},
* procedural language
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.51 2004/08/20 22:00:14 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/plpgsql/src/plpgsql.h,v 1.52 2004/08/29 05:07:01 momjian Exp $
*
* This software is copyrighted by Jan Wieck - Hamburg.
*
void *plan;
Oid *plan_argtypes;
/* fields for "simple expression" fast-path execution: */
- Expr *expr_simple_expr; /* NULL means not a simple expr */
+ Expr *expr_simple_expr; /* NULL means not a simple expr */
Oid expr_simple_type;
/* if expr is simple AND in use in current xact, these fields are set: */
ExprState *expr_simple_state;
typedef struct
{ /* List of WHEN clauses */
- int exceptions_alloc; /* XXX this oughta just be a List ... */
+ int exceptions_alloc; /* XXX this oughta just be a List
+ * ... */
int exceptions_used;
PLpgSQL_exception **exceptions;
} PLpgSQL_exceptions;
Oid funcOid;
/*
- * For a trigger function, the OID of the relation triggered on is part
- * of the hashkey --- we want to compile the trigger separately for each
- * relation it is used with, in case the rowtype is different. Zero if
- * not called as a trigger.
+ * For a trigger function, the OID of the relation triggered on is
+ * part of the hashkey --- we want to compile the trigger separately
+ * for each relation it is used with, in case the rowtype is
+ * different. Zero if not called as a trigger.
*/
Oid trigrelOid;
* ----------
*/
extern PLpgSQL_function *plpgsql_compile(FunctionCallInfo fcinfo,
- bool forValidator);
+ bool forValidator);
extern int plpgsql_parse_word(char *word);
extern int plpgsql_parse_dblword(char *word);
extern int plpgsql_parse_tripword(char *word);
extern PLpgSQL_type *plpgsql_parse_datatype(const char *string);
extern PLpgSQL_type *plpgsql_build_datatype(Oid typeOid, int32 typmod);
extern PLpgSQL_variable *plpgsql_build_variable(char *refname, int lineno,
- PLpgSQL_type *dtype,
- bool add2namespace);
+ PLpgSQL_type * dtype,
+ bool add2namespace);
extern PLpgSQL_condition *plpgsql_parse_err_condition(char *condname);
-extern void plpgsql_adddatum(PLpgSQL_datum *new);
+extern void plpgsql_adddatum(PLpgSQL_datum * new);
extern int plpgsql_add_initdatums(int **varnos);
extern void plpgsql_HashTableInit(void);
* MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.53 2004/08/05 03:10:29 joe Exp $
+ * $PostgreSQL: pgsql/src/pl/plpython/plpython.c,v 1.54 2004/08/29 05:07:01 momjian Exp $
*
*********************************************************************
*/
PLyTypeInput in;
PLyTypeOutput out;
int is_rowtype;
+
/*
- * is_rowtype can be:
- * -1 not known yet (initial state)
- * 0 scalar datatype
- * 1 rowtype
- * 2 rowtype, but I/O functions not set up yet
+ * is_rowtype can be: -1 not known yet (initial state) 0 scalar
+ * datatype 1 rowtype 2 rowtype, but I/O functions not set up yet
*/
} PLyTypeInfo;
{
PyObject_HEAD
/* HeapTuple *tuples; */
- PyObject *nrows; /* number of rows returned by query */
+ PyObject * nrows; /* number of rows returned by query */
PyObject *rows; /* data rows, or None if no data returned */
PyObject *status; /* query status, SPI_OK_*, or SPI_ERR_* */
} PLyResultObject;
static PyObject *PLy_procedure_call(PLyProcedure *, char *, PyObject *);
static PLyProcedure *PLy_procedure_get(FunctionCallInfo fcinfo,
- Oid tgreloid);
+ Oid tgreloid);
static PLyProcedure *PLy_procedure_create(FunctionCallInfo fcinfo,
Oid tgreloid,
HeapTuple trv;
proc = PLy_procedure_get(fcinfo,
- RelationGetRelid(tdata->tg_relation));
+ RelationGetRelid(tdata->tg_relation));
trv = PLy_trigger_handler(fcinfo, proc);
retval = PointerGetDatum(trv);
}
PG_TRY();
{
- plargs = PLy_trigger_build_args(fcinfo, proc, &rv);
- plrv = PLy_procedure_call(proc, "TD", plargs);
+ plargs = PLy_trigger_build_args(fcinfo, proc, &rv);
+ plrv = PLy_procedure_call(proc, "TD", plargs);
- Assert(plrv != NULL);
- Assert(!PLy_error_in_progress);
+ Assert(plrv != NULL);
+ Assert(!PLy_error_in_progress);
- /*
- * Disconnect from SPI manager
- */
- if (SPI_finish() != SPI_OK_FINISH)
- elog(ERROR, "SPI_finish failed");
+ /*
+ * Disconnect from SPI manager
+ */
+ if (SPI_finish() != SPI_OK_FINISH)
+ elog(ERROR, "SPI_finish failed");
- /*
- * return of None means we're happy with the tuple
- */
- if (plrv != Py_None)
- {
- char *srv;
+ /*
+ * return of None means we're happy with the tuple
+ */
+ if (plrv != Py_None)
+ {
+ char *srv;
- if (!PyString_Check(plrv))
- elog(ERROR, "expected trigger to return None or a String");
+ if (!PyString_Check(plrv))
+ elog(ERROR, "expected trigger to return None or a String");
- srv = PyString_AsString(plrv);
- if (pg_strcasecmp(srv, "SKIP") == 0)
- rv = NULL;
- else if (pg_strcasecmp(srv, "MODIFY") == 0)
- {
- TriggerData *tdata = (TriggerData *) fcinfo->context;
+ srv = PyString_AsString(plrv);
+ if (pg_strcasecmp(srv, "SKIP") == 0)
+ rv = NULL;
+ else if (pg_strcasecmp(srv, "MODIFY") == 0)
+ {
+ TriggerData *tdata = (TriggerData *) fcinfo->context;
- if ((TRIGGER_FIRED_BY_INSERT(tdata->tg_event)) ||
- (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event)))
- rv = PLy_modify_tuple(proc, plargs, tdata, rv);
- else
- elog(WARNING, "ignoring modified tuple in DELETE trigger");
- }
- else if (pg_strcasecmp(srv, "OK") != 0)
- {
- /*
- * hmmm, perhaps they only read the pltcl page, not a
- * surprising thing since i've written no documentation, so
- * accept a belated OK
- */
- elog(ERROR, "expected return to be \"SKIP\" or \"MODIFY\"");
+ if ((TRIGGER_FIRED_BY_INSERT(tdata->tg_event)) ||
+ (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event)))
+ rv = PLy_modify_tuple(proc, plargs, tdata, rv);
+ else
+ elog(WARNING, "ignoring modified tuple in DELETE trigger");
+ }
+ else if (pg_strcasecmp(srv, "OK") != 0)
+ {
+ /*
+ * hmmm, perhaps they only read the pltcl page, not a
+ * surprising thing since i've written no documentation,
+ * so accept a belated OK
+ */
+ elog(ERROR, "expected return to be \"SKIP\" or \"MODIFY\"");
+ }
}
}
- }
PG_CATCH();
{
Py_XDECREF(plargs);
PG_TRY();
{
- if ((plntup = PyDict_GetItemString(pltd, "new")) == NULL)
- elog(ERROR, "TD[\"new\"] deleted, unable to modify tuple");
- if (!PyDict_Check(plntup))
- elog(ERROR, "TD[\"new\"] is not a dictionary object");
- Py_INCREF(plntup);
+ if ((plntup = PyDict_GetItemString(pltd, "new")) == NULL)
+ elog(ERROR, "TD[\"new\"] deleted, unable to modify tuple");
+ if (!PyDict_Check(plntup))
+ elog(ERROR, "TD[\"new\"] is not a dictionary object");
+ Py_INCREF(plntup);
- plkeys = PyDict_Keys(plntup);
- natts = PyList_Size(plkeys);
+ plkeys = PyDict_Keys(plntup);
+ natts = PyList_Size(plkeys);
- modattrs = (int *) palloc(natts * sizeof(int));
- modvalues = (Datum *) palloc(natts * sizeof(Datum));
- modnulls = (char *) palloc(natts * sizeof(char));
+ modattrs = (int *) palloc(natts * sizeof(int));
+ modvalues = (Datum *) palloc(natts * sizeof(Datum));
+ modnulls = (char *) palloc(natts * sizeof(char));
- tupdesc = tdata->tg_relation->rd_att;
+ tupdesc = tdata->tg_relation->rd_att;
- for (i = 0; i < natts; i++)
- {
- char *src;
+ for (i = 0; i < natts; i++)
+ {
+ char *src;
- platt = PyList_GetItem(plkeys, i);
- if (!PyString_Check(platt))
- elog(ERROR, "attribute name is not a string");
- attn = SPI_fnumber(tupdesc, PyString_AsString(platt));
- if (attn == SPI_ERROR_NOATTRIBUTE)
- elog(ERROR, "invalid attribute \"%s\" in tuple",
- PyString_AsString(platt));
- atti = attn - 1;
+ platt = PyList_GetItem(plkeys, i);
+ if (!PyString_Check(platt))
+ elog(ERROR, "attribute name is not a string");
+ attn = SPI_fnumber(tupdesc, PyString_AsString(platt));
+ if (attn == SPI_ERROR_NOATTRIBUTE)
+ elog(ERROR, "invalid attribute \"%s\" in tuple",
+ PyString_AsString(platt));
+ atti = attn - 1;
- plval = PyDict_GetItem(plntup, platt);
- if (plval == NULL)
- elog(FATAL, "python interpreter is probably corrupted");
+ plval = PyDict_GetItem(plntup, platt);
+ if (plval == NULL)
+ elog(FATAL, "python interpreter is probably corrupted");
- Py_INCREF(plval);
+ Py_INCREF(plval);
- modattrs[i] = attn;
+ modattrs[i] = attn;
- if (plval != Py_None && !tupdesc->attrs[atti]->attisdropped)
- {
- plstr = PyObject_Str(plval);
- src = PyString_AsString(plstr);
+ if (plval != Py_None && !tupdesc->attrs[atti]->attisdropped)
+ {
+ plstr = PyObject_Str(plval);
+ src = PyString_AsString(plstr);
- modvalues[i] = FunctionCall3(&proc->result.out.r.atts[atti].typfunc,
- CStringGetDatum(src),
- ObjectIdGetDatum(proc->result.out.r.atts[atti].typioparam),
+ modvalues[i] = FunctionCall3(&proc->result.out.r.atts[atti].typfunc,
+ CStringGetDatum(src),
+ ObjectIdGetDatum(proc->result.out.r.atts[atti].typioparam),
Int32GetDatum(tupdesc->attrs[atti]->atttypmod));
- modnulls[i] = ' ';
+ modnulls[i] = ' ';
- Py_DECREF(plstr);
- plstr = NULL;
- }
- else
- {
- modvalues[i] = (Datum) 0;
- modnulls[i] = 'n';
- }
+ Py_DECREF(plstr);
+ plstr = NULL;
+ }
+ else
+ {
+ modvalues[i] = (Datum) 0;
+ modnulls[i] = 'n';
+ }
- Py_DECREF(plval);
- plval = NULL;
- }
+ Py_DECREF(plval);
+ plval = NULL;
+ }
- rtup = SPI_modifytuple(tdata->tg_relation, otup, natts,
- modattrs, modvalues, modnulls);
- if (rtup == NULL)
- elog(ERROR, "SPI_modifytuple failed -- error %d", SPI_result);
+ rtup = SPI_modifytuple(tdata->tg_relation, otup, natts,
+ modattrs, modvalues, modnulls);
+ if (rtup == NULL)
+ elog(ERROR, "SPI_modifytuple failed -- error %d", SPI_result);
}
PG_CATCH();
{
PG_TRY();
{
- pltdata = PyDict_New();
- if (!pltdata)
- PLy_elog(ERROR, "could not build arguments for trigger procedure");
+ pltdata = PyDict_New();
+ if (!pltdata)
+ PLy_elog(ERROR, "could not build arguments for trigger procedure");
- pltname = PyString_FromString(tdata->tg_trigger->tgname);
- PyDict_SetItemString(pltdata, "name", pltname);
- Py_DECREF(pltname);
+ pltname = PyString_FromString(tdata->tg_trigger->tgname);
+ PyDict_SetItemString(pltdata, "name", pltname);
+ Py_DECREF(pltname);
- stroid = DatumGetCString(DirectFunctionCall1(oidout,
+ stroid = DatumGetCString(DirectFunctionCall1(oidout,
ObjectIdGetDatum(tdata->tg_relation->rd_id)));
- pltrelid = PyString_FromString(stroid);
- PyDict_SetItemString(pltdata, "relid", pltrelid);
- Py_DECREF(pltrelid);
- pfree(stroid);
-
- if (TRIGGER_FIRED_BEFORE(tdata->tg_event))
- pltwhen = PyString_FromString("BEFORE");
- else if (TRIGGER_FIRED_AFTER(tdata->tg_event))
- pltwhen = PyString_FromString("AFTER");
- else
- {
- elog(ERROR, "unrecognized WHEN tg_event: %u", tdata->tg_event);
- pltwhen = NULL; /* keep compiler quiet */
- }
- PyDict_SetItemString(pltdata, "when", pltwhen);
- Py_DECREF(pltwhen);
-
- if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
- {
- pltlevel = PyString_FromString("ROW");
- PyDict_SetItemString(pltdata, "level", pltlevel);
- Py_DECREF(pltlevel);
+ pltrelid = PyString_FromString(stroid);
+ PyDict_SetItemString(pltdata, "relid", pltrelid);
+ Py_DECREF(pltrelid);
+ pfree(stroid);
+
+ if (TRIGGER_FIRED_BEFORE(tdata->tg_event))
+ pltwhen = PyString_FromString("BEFORE");
+ else if (TRIGGER_FIRED_AFTER(tdata->tg_event))
+ pltwhen = PyString_FromString("AFTER");
+ else
+ {
+ elog(ERROR, "unrecognized WHEN tg_event: %u", tdata->tg_event);
+ pltwhen = NULL; /* keep compiler quiet */
+ }
+ PyDict_SetItemString(pltdata, "when", pltwhen);
+ Py_DECREF(pltwhen);
- if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
+ if (TRIGGER_FIRED_FOR_ROW(tdata->tg_event))
{
- pltevent = PyString_FromString("INSERT");
+ pltlevel = PyString_FromString("ROW");
+ PyDict_SetItemString(pltdata, "level", pltlevel);
+ Py_DECREF(pltlevel);
- PyDict_SetItemString(pltdata, "old", Py_None);
- pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
- tdata->tg_relation->rd_att);
- PyDict_SetItemString(pltdata, "new", pytnew);
- Py_DECREF(pytnew);
- *rv = tdata->tg_trigtuple;
+ if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
+ {
+ pltevent = PyString_FromString("INSERT");
+
+ PyDict_SetItemString(pltdata, "old", Py_None);
+ pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
+ tdata->tg_relation->rd_att);
+ PyDict_SetItemString(pltdata, "new", pytnew);
+ Py_DECREF(pytnew);
+ *rv = tdata->tg_trigtuple;
+ }
+ else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event))
+ {
+ pltevent = PyString_FromString("DELETE");
+
+ PyDict_SetItemString(pltdata, "new", Py_None);
+ pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
+ tdata->tg_relation->rd_att);
+ PyDict_SetItemString(pltdata, "old", pytold);
+ Py_DECREF(pytold);
+ *rv = tdata->tg_trigtuple;
+ }
+ else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event))
+ {
+ pltevent = PyString_FromString("UPDATE");
+
+ pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_newtuple,
+ tdata->tg_relation->rd_att);
+ PyDict_SetItemString(pltdata, "new", pytnew);
+ Py_DECREF(pytnew);
+ pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
+ tdata->tg_relation->rd_att);
+ PyDict_SetItemString(pltdata, "old", pytold);
+ Py_DECREF(pytold);
+ *rv = tdata->tg_newtuple;
+ }
+ else
+ {
+ elog(ERROR, "unrecognized OP tg_event: %u", tdata->tg_event);
+ pltevent = NULL; /* keep compiler quiet */
+ }
+
+ PyDict_SetItemString(pltdata, "event", pltevent);
+ Py_DECREF(pltevent);
}
- else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event))
+ else if (TRIGGER_FIRED_FOR_STATEMENT(tdata->tg_event))
{
- pltevent = PyString_FromString("DELETE");
+ pltlevel = PyString_FromString("STATEMENT");
+ PyDict_SetItemString(pltdata, "level", pltlevel);
+ Py_DECREF(pltlevel);
+ PyDict_SetItemString(pltdata, "old", Py_None);
PyDict_SetItemString(pltdata, "new", Py_None);
- pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
- tdata->tg_relation->rd_att);
- PyDict_SetItemString(pltdata, "old", pytold);
- Py_DECREF(pytold);
- *rv = tdata->tg_trigtuple;
- }
- else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event))
- {
- pltevent = PyString_FromString("UPDATE");
-
- pytnew = PLyDict_FromTuple(&(proc->result), tdata->tg_newtuple,
- tdata->tg_relation->rd_att);
- PyDict_SetItemString(pltdata, "new", pytnew);
- Py_DECREF(pytnew);
- pytold = PLyDict_FromTuple(&(proc->result), tdata->tg_trigtuple,
- tdata->tg_relation->rd_att);
- PyDict_SetItemString(pltdata, "old", pytold);
- Py_DECREF(pytold);
- *rv = tdata->tg_newtuple;
+ *rv = NULL;
+
+ if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
+ pltevent = PyString_FromString("INSERT");
+ else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event))
+ pltevent = PyString_FromString("DELETE");
+ else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event))
+ pltevent = PyString_FromString("UPDATE");
+ else
+ {
+ elog(ERROR, "unrecognized OP tg_event: %u", tdata->tg_event);
+ pltevent = NULL; /* keep compiler quiet */
+ }
+
+ PyDict_SetItemString(pltdata, "event", pltevent);
+ Py_DECREF(pltevent);
}
else
- {
- elog(ERROR, "unrecognized OP tg_event: %u", tdata->tg_event);
- pltevent = NULL; /* keep compiler quiet */
- }
+ elog(ERROR, "unrecognized LEVEL tg_event: %u", tdata->tg_event);
- PyDict_SetItemString(pltdata, "event", pltevent);
- Py_DECREF(pltevent);
- }
- else if (TRIGGER_FIRED_FOR_STATEMENT(tdata->tg_event))
- {
- pltlevel = PyString_FromString("STATEMENT");
- PyDict_SetItemString(pltdata, "level", pltlevel);
- Py_DECREF(pltlevel);
-
- PyDict_SetItemString(pltdata, "old", Py_None);
- PyDict_SetItemString(pltdata, "new", Py_None);
- *rv = NULL;
-
- if (TRIGGER_FIRED_BY_INSERT(tdata->tg_event))
- pltevent = PyString_FromString("INSERT");
- else if (TRIGGER_FIRED_BY_DELETE(tdata->tg_event))
- pltevent = PyString_FromString("DELETE");
- else if (TRIGGER_FIRED_BY_UPDATE(tdata->tg_event))
- pltevent = PyString_FromString("UPDATE");
- else
+ if (tdata->tg_trigger->tgnargs)
{
- elog(ERROR, "unrecognized OP tg_event: %u", tdata->tg_event);
- pltevent = NULL; /* keep compiler quiet */
- }
-
- PyDict_SetItemString(pltdata, "event", pltevent);
- Py_DECREF(pltevent);
- }
- else
- elog(ERROR, "unrecognized LEVEL tg_event: %u", tdata->tg_event);
+ /*
+ * all strings...
+ */
+ int i;
+ PyObject *pltarg;
- if (tdata->tg_trigger->tgnargs)
- {
- /*
- * all strings...
- */
- int i;
- PyObject *pltarg;
+ pltargs = PyList_New(tdata->tg_trigger->tgnargs);
+ for (i = 0; i < tdata->tg_trigger->tgnargs; i++)
+ {
+ pltarg = PyString_FromString(tdata->tg_trigger->tgargs[i]);
- pltargs = PyList_New(tdata->tg_trigger->tgnargs);
- for (i = 0; i < tdata->tg_trigger->tgnargs; i++)
+ /*
+ * stolen, don't Py_DECREF
+ */
+ PyList_SetItem(pltargs, i, pltarg);
+ }
+ }
+ else
{
- pltarg = PyString_FromString(tdata->tg_trigger->tgargs[i]);
-
- /*
- * stolen, don't Py_DECREF
- */
- PyList_SetItem(pltargs, i, pltarg);
+ Py_INCREF(Py_None);
+ pltargs = Py_None;
}
- }
- else
- {
- Py_INCREF(Py_None);
- pltargs = Py_None;
- }
- PyDict_SetItemString(pltdata, "args", pltargs);
- Py_DECREF(pltargs);
+ PyDict_SetItemString(pltdata, "args", pltargs);
+ Py_DECREF(pltargs);
}
PG_CATCH();
{
PG_TRY();
{
- plargs = PLy_function_build_args(fcinfo, proc);
- plrv = PLy_procedure_call(proc, "args", plargs);
+ plargs = PLy_function_build_args(fcinfo, proc);
+ plrv = PLy_procedure_call(proc, "args", plargs);
- Assert(plrv != NULL);
- Assert(!PLy_error_in_progress);
+ Assert(plrv != NULL);
+ Assert(!PLy_error_in_progress);
- /*
- * Disconnect from SPI manager and then create the return values datum
- * (if the input function does a palloc for it this must not be
- * allocated in the SPI memory context because SPI_finish would free
- * it).
- */
- if (SPI_finish() != SPI_OK_FINISH)
- elog(ERROR, "SPI_finish failed");
+ /*
+ * Disconnect from SPI manager and then create the return values
+ * datum (if the input function does a palloc for it this must not
+ * be allocated in the SPI memory context because SPI_finish would
+ * free it).
+ */
+ if (SPI_finish() != SPI_OK_FINISH)
+ elog(ERROR, "SPI_finish failed");
- /*
- * convert the python PyObject to a postgresql Datum
- */
- if (plrv == Py_None)
- {
- fcinfo->isnull = true;
- rv = (Datum) NULL;
- }
- else
- {
- fcinfo->isnull = false;
- plrv_so = PyObject_Str(plrv);
- plrv_sc = PyString_AsString(plrv_so);
- rv = FunctionCall3(&proc->result.out.d.typfunc,
- PointerGetDatum(plrv_sc),
- ObjectIdGetDatum(proc->result.out.d.typioparam),
- Int32GetDatum(-1));
- }
+ /*
+ * convert the python PyObject to a postgresql Datum
+ */
+ if (plrv == Py_None)
+ {
+ fcinfo->isnull = true;
+ rv = (Datum) NULL;
+ }
+ else
+ {
+ fcinfo->isnull = false;
+ plrv_so = PyObject_Str(plrv);
+ plrv_sc = PyString_AsString(plrv_so);
+ rv = FunctionCall3(&proc->result.out.d.typfunc,
+ PointerGetDatum(plrv_sc),
+ ObjectIdGetDatum(proc->result.out.d.typioparam),
+ Int32GetDatum(-1));
+ }
}
PG_CATCH();
PLy_last_procedure = current;
/*
- * If there was an error in a PG callback, propagate that
- * no matter what Python claims about its success.
+ * If there was an error in a PG callback, propagate that no matter
+ * what Python claims about its success.
*/
if (PLy_error_in_progress)
{
- ErrorData *edata = PLy_error_in_progress;
+ ErrorData *edata = PLy_error_in_progress;
PLy_error_in_progress = NULL;
ReThrowError(edata);
PG_TRY();
{
- args = PyList_New(proc->nargs);
- for (i = 0; i < proc->nargs; i++)
- {
- if (proc->args[i].is_rowtype > 0)
+ args = PyList_New(proc->nargs);
+ for (i = 0; i < proc->nargs; i++)
{
- if (fcinfo->argnull[i])
- arg = NULL;
- else
+ if (proc->args[i].is_rowtype > 0)
{
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup;
-
- td = DatumGetHeapTupleHeader(fcinfo->arg[i]);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
-
- /* Set up I/O funcs if not done yet */
- if (proc->args[i].is_rowtype != 1)
- PLy_input_tuple_funcs(&(proc->args[i]), tupdesc);
-
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- tmptup.t_data = td;
-
- arg = PLyDict_FromTuple(&(proc->args[i]), &tmptup, tupdesc);
+ if (fcinfo->argnull[i])
+ arg = NULL;
+ else
+ {
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup;
+
+ td = DatumGetHeapTupleHeader(fcinfo->arg[i]);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+
+ /* Set up I/O funcs if not done yet */
+ if (proc->args[i].is_rowtype != 1)
+ PLy_input_tuple_funcs(&(proc->args[i]), tupdesc);
+
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ tmptup.t_data = td;
+
+ arg = PLyDict_FromTuple(&(proc->args[i]), &tmptup, tupdesc);
+ }
}
- }
- else
- {
- if (fcinfo->argnull[i])
- arg = NULL;
else
{
- char *ct;
- Datum dt;
-
- dt = FunctionCall3(&(proc->args[i].in.d.typfunc),
- fcinfo->arg[i],
- ObjectIdGetDatum(proc->args[i].in.d.typioparam),
- Int32GetDatum(-1));
- ct = DatumGetCString(dt);
- arg = (proc->args[i].in.d.func) (ct);
- pfree(ct);
+ if (fcinfo->argnull[i])
+ arg = NULL;
+ else
+ {
+ char *ct;
+ Datum dt;
+
+ dt = FunctionCall3(&(proc->args[i].in.d.typfunc),
+ fcinfo->arg[i],
+ ObjectIdGetDatum(proc->args[i].in.d.typioparam),
+ Int32GetDatum(-1));
+ ct = DatumGetCString(dt);
+ arg = (proc->args[i].in.d.func) (ct);
+ pfree(ct);
+ }
}
- }
- if (arg == NULL)
- {
- Py_INCREF(Py_None);
- arg = Py_None;
- }
+ if (arg == NULL)
+ {
+ Py_INCREF(Py_None);
+ arg = Py_None;
+ }
- /*
- * FIXME -- error check this
- */
- PyList_SetItem(args, i, arg);
- }
+ /*
+ * FIXME -- error check this
+ */
+ PyList_SetItem(args, i, arg);
+ }
}
PG_CATCH();
{
*/
/* PLy_procedure_get: returns a cached PLyProcedure, or creates, stores and
- * returns a new PLyProcedure. fcinfo is the call info, tgreloid is the
+ * returns a new PLyProcedure. fcinfo is the call info, tgreloid is the
* relation OID when calling a trigger, or InvalidOid (zero) for ordinary
* function calls.
*/
PG_TRY();
{
- /*
- * get information required for output conversion of the return value,
- * but only if this isn't a trigger.
- */
- if (!CALLED_AS_TRIGGER(fcinfo))
- {
- HeapTuple rvTypeTup;
- Form_pg_type rvTypeStruct;
+ /*
+ * get information required for output conversion of the return
+ * value, but only if this isn't a trigger.
+ */
+ if (!CALLED_AS_TRIGGER(fcinfo))
+ {
+ HeapTuple rvTypeTup;
+ Form_pg_type rvTypeStruct;
- rvTypeTup = SearchSysCache(TYPEOID,
+ rvTypeTup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(procStruct->prorettype),
- 0, 0, 0);
- if (!HeapTupleIsValid(rvTypeTup))
- elog(ERROR, "cache lookup failed for type %u",
- procStruct->prorettype);
+ 0, 0, 0);
+ if (!HeapTupleIsValid(rvTypeTup))
+ elog(ERROR, "cache lookup failed for type %u",
+ procStruct->prorettype);
+
+ rvTypeStruct = (Form_pg_type) GETSTRUCT(rvTypeTup);
+ if (rvTypeStruct->typtype != 'c')
+ PLy_output_datum_func(&proc->result, rvTypeTup);
+ else
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("tuple return types are not supported yet")));
- rvTypeStruct = (Form_pg_type) GETSTRUCT(rvTypeTup);
- if (rvTypeStruct->typtype != 'c')
- PLy_output_datum_func(&proc->result, rvTypeTup);
+ ReleaseSysCache(rvTypeTup);
+ }
else
- ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("tuple return types are not supported yet")));
+ {
+ /*
+ * input/output conversion for trigger tuples. use the result
+ * TypeInfo variable to store the tuple conversion info.
+ */
+ TriggerData *tdata = (TriggerData *) fcinfo->context;
+
+ PLy_input_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att);
+ PLy_output_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att);
+ }
- ReleaseSysCache(rvTypeTup);
- }
- else
- {
/*
- * input/output conversion for trigger tuples. use the result
- * TypeInfo variable to store the tuple conversion info.
+ * now get information required for input conversion of the
+ * procedures arguments.
*/
- TriggerData *tdata = (TriggerData *) fcinfo->context;
-
- PLy_input_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att);
- PLy_output_tuple_funcs(&(proc->result), tdata->tg_relation->rd_att);
- }
-
- /*
- * now get information required for input conversion of the procedures
- * arguments.
- */
- proc->nargs = fcinfo->nargs;
- for (i = 0; i < fcinfo->nargs; i++)
- {
- HeapTuple argTypeTup;
- Form_pg_type argTypeStruct;
+ proc->nargs = fcinfo->nargs;
+ for (i = 0; i < fcinfo->nargs; i++)
+ {
+ HeapTuple argTypeTup;
+ Form_pg_type argTypeStruct;
- argTypeTup = SearchSysCache(TYPEOID,
+ argTypeTup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(procStruct->proargtypes[i]),
- 0, 0, 0);
- if (!HeapTupleIsValid(argTypeTup))
- elog(ERROR, "cache lookup failed for type %u",
- procStruct->proargtypes[i]);
- argTypeStruct = (Form_pg_type) GETSTRUCT(argTypeTup);
-
- if (argTypeStruct->typtype != 'c')
- PLy_input_datum_func(&(proc->args[i]),
- procStruct->proargtypes[i],
- argTypeTup);
- else
- proc->args[i].is_rowtype = 2; /* still need to set I/O funcs */
+ 0, 0, 0);
+ if (!HeapTupleIsValid(argTypeTup))
+ elog(ERROR, "cache lookup failed for type %u",
+ procStruct->proargtypes[i]);
+ argTypeStruct = (Form_pg_type) GETSTRUCT(argTypeTup);
+
+ if (argTypeStruct->typtype != 'c')
+ PLy_input_datum_func(&(proc->args[i]),
+ procStruct->proargtypes[i],
+ argTypeTup);
+ else
+ proc->args[i].is_rowtype = 2; /* still need to set I/O
+ * funcs */
- ReleaseSysCache(argTypeTup);
- }
+ ReleaseSysCache(argTypeTup);
+ }
- /*
- * get the text of the function.
- */
- prosrcdatum = SysCacheGetAttr(PROCOID, procTup,
- Anum_pg_proc_prosrc, &isnull);
- if (isnull)
- elog(ERROR, "null prosrc");
- procSource = DatumGetCString(DirectFunctionCall1(textout,
- prosrcdatum));
+ /*
+ * get the text of the function.
+ */
+ prosrcdatum = SysCacheGetAttr(PROCOID, procTup,
+ Anum_pg_proc_prosrc, &isnull);
+ if (isnull)
+ elog(ERROR, "null prosrc");
+ procSource = DatumGetCString(DirectFunctionCall1(textout,
+ prosrcdatum));
- PLy_procedure_compile(proc, procSource);
+ PLy_procedure_compile(proc, procSource);
- pfree(procSource);
+ pfree(procSource);
- proc->me = PyCObject_FromVoidPtr(proc, NULL);
- PyDict_SetItemString(PLy_procedure_cache, key, proc->me);
+ proc->me = PyCObject_FromVoidPtr(proc, NULL);
+ PyDict_SetItemString(PLy_procedure_cache, key, proc->me);
}
PG_CATCH();
{
PG_TRY();
{
- for (i = 0; i < info->in.r.natts; i++)
- {
- char *key,
- *vsrc;
- Datum vattr,
- vdat;
- bool is_null;
- PyObject *value;
+ for (i = 0; i < info->in.r.natts; i++)
+ {
+ char *key,
+ *vsrc;
+ Datum vattr,
+ vdat;
+ bool is_null;
+ PyObject *value;
- if (desc->attrs[i]->attisdropped)
- continue;
+ if (desc->attrs[i]->attisdropped)
+ continue;
- key = NameStr(desc->attrs[i]->attname);
- vattr = heap_getattr(tuple, (i + 1), desc, &is_null);
+ key = NameStr(desc->attrs[i]->attname);
+ vattr = heap_getattr(tuple, (i + 1), desc, &is_null);
- if ((is_null) || (info->in.r.atts[i].func == NULL))
- PyDict_SetItemString(dict, key, Py_None);
- else
- {
- vdat = FunctionCall3(&info->in.r.atts[i].typfunc,
- vattr,
- ObjectIdGetDatum(info->in.r.atts[i].typioparam),
+ if ((is_null) || (info->in.r.atts[i].func == NULL))
+ PyDict_SetItemString(dict, key, Py_None);
+ else
+ {
+ vdat = FunctionCall3(&info->in.r.atts[i].typfunc,
+ vattr,
+ ObjectIdGetDatum(info->in.r.atts[i].typioparam),
Int32GetDatum(desc->attrs[i]->atttypmod));
- vsrc = DatumGetCString(vdat);
-
- /*
- * no exceptions allowed
- */
- value = info->in.r.atts[i].func(vsrc);
- pfree(vsrc);
- PyDict_SetItemString(dict, key, value);
- Py_DECREF(value);
+ vsrc = DatumGetCString(vdat);
+
+ /*
+ * no exceptions allowed
+ */
+ value = info->in.r.atts[i].func(vsrc);
+ pfree(vsrc);
+ PyDict_SetItemString(dict, key, value);
+ Py_DECREF(value);
+ }
}
}
- }
PG_CATCH();
{
Py_DECREF(dict);
oldcontext = CurrentMemoryContext;
PG_TRY();
{
- if (list != NULL)
- {
- int nargs,
- i;
-
- nargs = PySequence_Length(list);
- if (nargs > 0)
+ if (list != NULL)
{
- plan->nargs = nargs;
- plan->types = PLy_malloc(sizeof(Oid) * nargs);
- plan->values = PLy_malloc(sizeof(Datum) * nargs);
- plan->args = PLy_malloc(sizeof(PLyTypeInfo) * nargs);
+ int nargs,
+ i;
- /*
- * the other loop might throw an exception, if PLyTypeInfo
- * member isn't properly initialized the Py_DECREF(plan) will
- * go boom
- */
- for (i = 0; i < nargs; i++)
+ nargs = PySequence_Length(list);
+ if (nargs > 0)
{
- PLy_typeinfo_init(&plan->args[i]);
- plan->values[i] = (Datum) NULL;
- }
+ plan->nargs = nargs;
+ plan->types = PLy_malloc(sizeof(Oid) * nargs);
+ plan->values = PLy_malloc(sizeof(Datum) * nargs);
+ plan->args = PLy_malloc(sizeof(PLyTypeInfo) * nargs);
+
+ /*
+ * the other loop might throw an exception, if PLyTypeInfo
+ * member isn't properly initialized the Py_DECREF(plan)
+ * will go boom
+ */
+ for (i = 0; i < nargs; i++)
+ {
+ PLy_typeinfo_init(&plan->args[i]);
+ plan->values[i] = (Datum) NULL;
+ }
- for (i = 0; i < nargs; i++)
- {
- char *sptr;
- HeapTuple typeTup;
- Form_pg_type typeStruct;
-
- optr = PySequence_GetItem(list, i);
- if (!PyString_Check(optr))
- elog(ERROR, "Type names must be strings.");
- sptr = PyString_AsString(optr);
- /* XXX should extend this to allow qualified type names */
- typeTup = typenameType(makeTypeName(sptr));
- Py_DECREF(optr);
- optr = NULL; /* this is important */
-
- plan->types[i] = HeapTupleGetOid(typeTup);
- typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
- if (typeStruct->typtype != 'c')
- PLy_output_datum_func(&plan->args[i], typeTup);
- else
- elog(ERROR, "tuples not handled in plpy.prepare, yet.");
- ReleaseSysCache(typeTup);
+ for (i = 0; i < nargs; i++)
+ {
+ char *sptr;
+ HeapTuple typeTup;
+ Form_pg_type typeStruct;
+
+ optr = PySequence_GetItem(list, i);
+ if (!PyString_Check(optr))
+ elog(ERROR, "Type names must be strings.");
+ sptr = PyString_AsString(optr);
+
+ /*
+ * XXX should extend this to allow qualified type
+ * names
+ */
+ typeTup = typenameType(makeTypeName(sptr));
+ Py_DECREF(optr);
+ optr = NULL; /* this is important */
+
+ plan->types[i] = HeapTupleGetOid(typeTup);
+ typeStruct = (Form_pg_type) GETSTRUCT(typeTup);
+ if (typeStruct->typtype != 'c')
+ PLy_output_datum_func(&plan->args[i], typeTup);
+ else
+ elog(ERROR, "tuples not handled in plpy.prepare, yet.");
+ ReleaseSysCache(typeTup);
+ }
}
}
- }
- plan->plan = SPI_prepare(query, plan->nargs, plan->types);
- if (plan->plan == NULL)
- elog(ERROR, "SPI_prepare failed: %s",
- SPI_result_code_string(SPI_result));
-
- /* transfer plan from procCxt to topCxt */
- tmpplan = plan->plan;
- plan->plan = SPI_saveplan(tmpplan);
- SPI_freeplan(tmpplan);
- if (plan->plan == NULL)
- elog(ERROR, "SPI_saveplan failed: %s",
- SPI_result_code_string(SPI_result));
+ plan->plan = SPI_prepare(query, plan->nargs, plan->types);
+ if (plan->plan == NULL)
+ elog(ERROR, "SPI_prepare failed: %s",
+ SPI_result_code_string(SPI_result));
+
+ /* transfer plan from procCxt to topCxt */
+ tmpplan = plan->plan;
+ plan->plan = SPI_saveplan(tmpplan);
+ SPI_freeplan(tmpplan);
+ if (plan->plan == NULL)
+ elog(ERROR, "SPI_saveplan failed: %s",
+ SPI_result_code_string(SPI_result));
}
PG_CATCH();
{
oldcontext = CurrentMemoryContext;
PG_TRY();
{
- nulls = palloc(nargs * sizeof(char));
-
- for (i = 0; i < nargs; i++)
- {
- PyObject *elem,
- *so;
- char *sv;
+ nulls = palloc(nargs * sizeof(char));
- elem = PySequence_GetItem(list, i);
- if (elem != Py_None)
+ for (i = 0; i < nargs; i++)
{
- so = PyObject_Str(elem);
- sv = PyString_AsString(so);
-
- /*
- * FIXME -- if this elogs, we have Python reference leak
- */
- plan->values[i] =
- FunctionCall3(&(plan->args[i].out.d.typfunc),
- CStringGetDatum(sv),
- ObjectIdGetDatum(plan->args[i].out.d.typioparam),
- Int32GetDatum(-1));
+ PyObject *elem,
+ *so;
+ char *sv;
- Py_DECREF(so);
- Py_DECREF(elem);
-
- nulls[i] = ' ';
- }
- else
- {
- Py_DECREF(elem);
- plan->values[i] = (Datum) 0;
- nulls[i] = 'n';
+ elem = PySequence_GetItem(list, i);
+ if (elem != Py_None)
+ {
+ so = PyObject_Str(elem);
+ sv = PyString_AsString(so);
+
+ /*
+ * FIXME -- if this elogs, we have Python reference leak
+ */
+ plan->values[i] =
+ FunctionCall3(&(plan->args[i].out.d.typfunc),
+ CStringGetDatum(sv),
+ ObjectIdGetDatum(plan->args[i].out.d.typioparam),
+ Int32GetDatum(-1));
+
+ Py_DECREF(so);
+ Py_DECREF(elem);
+
+ nulls[i] = ' ';
+ }
+ else
+ {
+ Py_DECREF(elem);
+ plan->values[i] = (Datum) 0;
+ nulls[i] = 'n';
+ }
}
- }
- rv = SPI_execp(plan->plan, plan->values, nulls, limit);
+ rv = SPI_execp(plan->plan, plan->values, nulls, limit);
- pfree(nulls);
+ pfree(nulls);
}
PG_CATCH();
{
MemoryContextSwitchTo(oldcontext);
PLy_error_in_progress = CopyErrorData();
FlushErrorState();
+
/*
* cleanup plan->values array
*/
oldcontext = CurrentMemoryContext;
PG_TRY();
- {
rv = SPI_exec(query, limit);
- }
PG_CATCH();
{
MemoryContextSwitchTo(oldcontext);
for (i = 0; i < rows; i++)
{
PyObject *row = PLyDict_FromTuple(&args, tuptable->vals[i],
- tuptable->tupdesc);
+ tuptable->tupdesc);
PyList_SetItem(result->rows, i, row);
}
FlushErrorState();
if (!PyErr_Occurred())
PyErr_SetString(PLy_exc_error,
- "Unknown error in PLy_spi_execute_fetch_result");
+ "Unknown error in PLy_spi_execute_fetch_result");
Py_DECREF(result);
PLy_typeinfo_dealloc(&args);
return NULL;
oldcontext = CurrentMemoryContext;
PG_TRY();
- {
elog(level, "%s", sv);
- }
PG_CATCH();
{
MemoryContextSwitchTo(oldcontext);
PLy_error_in_progress = CopyErrorData();
FlushErrorState();
Py_XDECREF(so);
+
/*
* returning NULL here causes the python interpreter to bail. when
* control passes back to PLy_procedure_call, we check for PG
* ENHANCEMENTS, OR MODIFICATIONS.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.89 2004/08/04 21:34:32 tgl Exp $
+ * $PostgreSQL: pgsql/src/pl/tcl/pltcl.c,v 1.90 2004/08/29 05:07:02 momjian Exp $
*
**********************************************************************/
* Forward declarations
**********************************************************************/
static void pltcl_init_all(void);
-static void pltcl_init_interp(Tcl_Interp *interp);
+static void pltcl_init_interp(Tcl_Interp * interp);
-static void pltcl_init_load_unknown(Tcl_Interp *interp);
+static void pltcl_init_load_unknown(Tcl_Interp * interp);
Datum pltcl_call_handler(PG_FUNCTION_ARGS);
Datum pltclu_call_handler(PG_FUNCTION_ARGS);
static pltcl_proc_desc *compile_pltcl_function(Oid fn_oid, Oid tgreloid);
-static int pltcl_elog(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_elog(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_quote(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_quote(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_exec(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_SPI_exec(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_execp(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_SPI_execp(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
+static int pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[]);
-static void pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
+static void pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc);
static void pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString *retval);
+ Tcl_DString * retval);
/*
* pltcl_init_interp() - initialize a Tcl interpreter
**********************************************************************/
static void
-pltcl_init_interp(Tcl_Interp *interp)
+pltcl_init_interp(Tcl_Interp * interp)
{
/************************************************************
* Install the commands for SPI support in the interpreter
* table pltcl_modules (if it exists)
**********************************************************************/
static void
-pltcl_init_load_unknown(Tcl_Interp *interp)
+pltcl_init_load_unknown(Tcl_Interp * interp)
{
int spi_rc;
int tcl_rc;
************************************************************/
PG_TRY();
{
- for (i = 0; i < prodesc->nargs; i++)
- {
- if (prodesc->arg_is_rowtype[i])
+ for (i = 0; i < prodesc->nargs; i++)
{
- /**************************************************
- * For tuple values, add a list for 'array set ...'
- **************************************************/
- if (fcinfo->argnull[i])
- Tcl_DStringAppendElement(&tcl_cmd, "");
- else
+ if (prodesc->arg_is_rowtype[i])
{
- HeapTupleHeader td;
- Oid tupType;
- int32 tupTypmod;
- TupleDesc tupdesc;
- HeapTupleData tmptup;
-
- td = DatumGetHeapTupleHeader(fcinfo->arg[i]);
- /* Extract rowtype info and find a tupdesc */
- tupType = HeapTupleHeaderGetTypeId(td);
- tupTypmod = HeapTupleHeaderGetTypMod(td);
- tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
- /* Build a temporary HeapTuple control structure */
- tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
- tmptup.t_data = td;
-
- Tcl_DStringSetLength(&list_tmp, 0);
- pltcl_build_tuple_argument(&tmptup, tupdesc, &list_tmp);
- Tcl_DStringAppendElement(&tcl_cmd,
- Tcl_DStringValue(&list_tmp));
+ /**************************************************
+ * For tuple values, add a list for 'array set ...'
+ **************************************************/
+ if (fcinfo->argnull[i])
+ Tcl_DStringAppendElement(&tcl_cmd, "");
+ else
+ {
+ HeapTupleHeader td;
+ Oid tupType;
+ int32 tupTypmod;
+ TupleDesc tupdesc;
+ HeapTupleData tmptup;
+
+ td = DatumGetHeapTupleHeader(fcinfo->arg[i]);
+ /* Extract rowtype info and find a tupdesc */
+ tupType = HeapTupleHeaderGetTypeId(td);
+ tupTypmod = HeapTupleHeaderGetTypMod(td);
+ tupdesc = lookup_rowtype_tupdesc(tupType, tupTypmod);
+ /* Build a temporary HeapTuple control structure */
+ tmptup.t_len = HeapTupleHeaderGetDatumLength(td);
+ tmptup.t_data = td;
+
+ Tcl_DStringSetLength(&list_tmp, 0);
+ pltcl_build_tuple_argument(&tmptup, tupdesc, &list_tmp);
+ Tcl_DStringAppendElement(&tcl_cmd,
+ Tcl_DStringValue(&list_tmp));
+ }
}
- }
- else
- {
- /**************************************************
- * Single values are added as string element
- * of their external representation
- **************************************************/
- if (fcinfo->argnull[i])
- Tcl_DStringAppendElement(&tcl_cmd, "");
else
{
- char *tmp;
-
- tmp = DatumGetCString(FunctionCall3(&prodesc->arg_out_func[i],
- fcinfo->arg[i],
- ObjectIdGetDatum(prodesc->arg_typioparam[i]),
- Int32GetDatum(-1)));
- UTF_BEGIN;
- Tcl_DStringAppendElement(&tcl_cmd, UTF_E2U(tmp));
- UTF_END;
- pfree(tmp);
+ /**************************************************
+ * Single values are added as string element
+ * of their external representation
+ **************************************************/
+ if (fcinfo->argnull[i])
+ Tcl_DStringAppendElement(&tcl_cmd, "");
+ else
+ {
+ char *tmp;
+
+ tmp = DatumGetCString(FunctionCall3(&prodesc->arg_out_func[i],
+ fcinfo->arg[i],
+ ObjectIdGetDatum(prodesc->arg_typioparam[i]),
+ Int32GetDatum(-1)));
+ UTF_BEGIN;
+ Tcl_DStringAppendElement(&tcl_cmd, UTF_E2U(tmp));
+ UTF_END;
+ pfree(tmp);
+ }
}
}
}
- }
PG_CATCH();
{
Tcl_DStringFree(&tcl_cmd);
************************************************************/
if (pltcl_error_in_progress)
{
- ErrorData *edata = pltcl_error_in_progress;
+ ErrorData *edata = pltcl_error_in_progress;
pltcl_error_in_progress = NULL;
ReThrowError(edata);
UTF_BEGIN;
retval = FunctionCall3(&prodesc->result_in_func,
PointerGetDatum(UTF_U2E(interp->result)),
- ObjectIdGetDatum(prodesc->result_typioparam),
+ ObjectIdGetDatum(prodesc->result_typioparam),
Int32GetDatum(-1));
UTF_END;
}
/* Find or compile the function */
prodesc = compile_pltcl_function(fcinfo->flinfo->fn_oid,
- RelationGetRelid(trigdata->tg_relation));
+ RelationGetRelid(trigdata->tg_relation));
if (prodesc->lanpltrusted)
interp = pltcl_safe_interp;
Tcl_DStringInit(&tcl_newtup);
PG_TRY();
{
- /* The procedure name */
- Tcl_DStringAppendElement(&tcl_cmd, prodesc->proname);
+ /* The procedure name */
+ Tcl_DStringAppendElement(&tcl_cmd, prodesc->proname);
- /* The trigger name for argument TG_name */
- Tcl_DStringAppendElement(&tcl_cmd, trigdata->tg_trigger->tgname);
+ /* The trigger name for argument TG_name */
+ Tcl_DStringAppendElement(&tcl_cmd, trigdata->tg_trigger->tgname);
- /* The oid of the trigger relation for argument TG_relid */
- stroid = DatumGetCString(DirectFunctionCall1(oidout,
+ /* The oid of the trigger relation for argument TG_relid */
+ stroid = DatumGetCString(DirectFunctionCall1(oidout,
ObjectIdGetDatum(trigdata->tg_relation->rd_id)));
- Tcl_DStringAppendElement(&tcl_cmd, stroid);
- pfree(stroid);
+ Tcl_DStringAppendElement(&tcl_cmd, stroid);
+ pfree(stroid);
- /* A list of attribute names for argument TG_relatts */
- Tcl_DStringAppendElement(&tcl_trigtup, "");
- for (i = 0; i < tupdesc->natts; i++)
- {
- if (tupdesc->attrs[i]->attisdropped)
- Tcl_DStringAppendElement(&tcl_trigtup, "");
+ /* A list of attribute names for argument TG_relatts */
+ Tcl_DStringAppendElement(&tcl_trigtup, "");
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ if (tupdesc->attrs[i]->attisdropped)
+ Tcl_DStringAppendElement(&tcl_trigtup, "");
+ else
+ Tcl_DStringAppendElement(&tcl_trigtup,
+ NameStr(tupdesc->attrs[i]->attname));
+ }
+ Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
+ Tcl_DStringFree(&tcl_trigtup);
+ Tcl_DStringInit(&tcl_trigtup);
+
+ /* The when part of the event for TG_when */
+ if (TRIGGER_FIRED_BEFORE(trigdata->tg_event))
+ Tcl_DStringAppendElement(&tcl_cmd, "BEFORE");
+ else if (TRIGGER_FIRED_AFTER(trigdata->tg_event))
+ Tcl_DStringAppendElement(&tcl_cmd, "AFTER");
else
- Tcl_DStringAppendElement(&tcl_trigtup,
- NameStr(tupdesc->attrs[i]->attname));
- }
- Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
- Tcl_DStringFree(&tcl_trigtup);
- Tcl_DStringInit(&tcl_trigtup);
+ elog(ERROR, "unrecognized WHEN tg_event: %u", trigdata->tg_event);
- /* The when part of the event for TG_when */
- if (TRIGGER_FIRED_BEFORE(trigdata->tg_event))
- Tcl_DStringAppendElement(&tcl_cmd, "BEFORE");
- else if (TRIGGER_FIRED_AFTER(trigdata->tg_event))
- Tcl_DStringAppendElement(&tcl_cmd, "AFTER");
- else
- elog(ERROR, "unrecognized WHEN tg_event: %u", trigdata->tg_event);
+ /* The level part of the event for TG_level */
+ if (TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
+ {
+ Tcl_DStringAppendElement(&tcl_cmd, "ROW");
- /* The level part of the event for TG_level */
- if (TRIGGER_FIRED_FOR_ROW(trigdata->tg_event))
- {
- Tcl_DStringAppendElement(&tcl_cmd, "ROW");
+ /* Build the data list for the trigtuple */
+ pltcl_build_tuple_argument(trigdata->tg_trigtuple,
+ tupdesc, &tcl_trigtup);
- /* Build the data list for the trigtuple */
- pltcl_build_tuple_argument(trigdata->tg_trigtuple,
- tupdesc, &tcl_trigtup);
+ /*
+ * Now the command part of the event for TG_op and data for
+ * NEW and OLD
+ */
+ if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
+ {
+ Tcl_DStringAppendElement(&tcl_cmd, "INSERT");
- /*
- * Now the command part of the event for TG_op and data for NEW
- * and OLD
- */
- if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
- {
- Tcl_DStringAppendElement(&tcl_cmd, "INSERT");
+ Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
+ Tcl_DStringAppendElement(&tcl_cmd, "");
- Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
- Tcl_DStringAppendElement(&tcl_cmd, "");
+ rettup = trigdata->tg_trigtuple;
+ }
+ else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
+ {
+ Tcl_DStringAppendElement(&tcl_cmd, "DELETE");
- rettup = trigdata->tg_trigtuple;
- }
- else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
- {
- Tcl_DStringAppendElement(&tcl_cmd, "DELETE");
+ Tcl_DStringAppendElement(&tcl_cmd, "");
+ Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
- Tcl_DStringAppendElement(&tcl_cmd, "");
- Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
+ rettup = trigdata->tg_trigtuple;
+ }
+ else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
+ {
+ Tcl_DStringAppendElement(&tcl_cmd, "UPDATE");
- rettup = trigdata->tg_trigtuple;
+ pltcl_build_tuple_argument(trigdata->tg_newtuple,
+ tupdesc, &tcl_newtup);
+
+ Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_newtup));
+ Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
+
+ rettup = trigdata->tg_newtuple;
+ }
+ else
+ elog(ERROR, "unrecognized OP tg_event: %u", trigdata->tg_event);
}
- else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
+ else if (TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
{
- Tcl_DStringAppendElement(&tcl_cmd, "UPDATE");
-
- pltcl_build_tuple_argument(trigdata->tg_newtuple,
- tupdesc, &tcl_newtup);
+ Tcl_DStringAppendElement(&tcl_cmd, "STATEMENT");
+
+ if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
+ Tcl_DStringAppendElement(&tcl_cmd, "INSERT");
+ else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
+ Tcl_DStringAppendElement(&tcl_cmd, "DELETE");
+ else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
+ Tcl_DStringAppendElement(&tcl_cmd, "UPDATE");
+ else
+ elog(ERROR, "unrecognized OP tg_event: %u", trigdata->tg_event);
- Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_newtup));
- Tcl_DStringAppendElement(&tcl_cmd, Tcl_DStringValue(&tcl_trigtup));
+ Tcl_DStringAppendElement(&tcl_cmd, "");
+ Tcl_DStringAppendElement(&tcl_cmd, "");
- rettup = trigdata->tg_newtuple;
+ rettup = (HeapTuple) NULL;
}
else
- elog(ERROR, "unrecognized OP tg_event: %u", trigdata->tg_event);
- }
- else if (TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
- {
- Tcl_DStringAppendElement(&tcl_cmd, "STATEMENT");
-
- if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
- Tcl_DStringAppendElement(&tcl_cmd, "INSERT");
- else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
- Tcl_DStringAppendElement(&tcl_cmd, "DELETE");
- else if (TRIGGER_FIRED_BY_UPDATE(trigdata->tg_event))
- Tcl_DStringAppendElement(&tcl_cmd, "UPDATE");
- else
- elog(ERROR, "unrecognized OP tg_event: %u", trigdata->tg_event);
+ elog(ERROR, "unrecognized LEVEL tg_event: %u", trigdata->tg_event);
- Tcl_DStringAppendElement(&tcl_cmd, "");
- Tcl_DStringAppendElement(&tcl_cmd, "");
-
- rettup = (HeapTuple) NULL;
- }
- else
- elog(ERROR, "unrecognized LEVEL tg_event: %u", trigdata->tg_event);
-
- /* Finally append the arguments from CREATE TRIGGER */
- for (i = 0; i < trigdata->tg_trigger->tgnargs; i++)
- Tcl_DStringAppendElement(&tcl_cmd, trigdata->tg_trigger->tgargs[i]);
+ /* Finally append the arguments from CREATE TRIGGER */
+ for (i = 0; i < trigdata->tg_trigger->tgnargs; i++)
+ Tcl_DStringAppendElement(&tcl_cmd, trigdata->tg_trigger->tgargs[i]);
}
PG_CATCH();
************************************************************/
if (pltcl_error_in_progress)
{
- ErrorData *edata = pltcl_error_in_progress;
+ ErrorData *edata = pltcl_error_in_progress;
pltcl_error_in_progress = NULL;
ReThrowError(edata);
PG_TRY();
{
- if (ret_numvals % 2 != 0)
- elog(ERROR, "invalid return list from trigger - must have even # of elements");
+ if (ret_numvals % 2 != 0)
+ elog(ERROR, "invalid return list from trigger - must have even # of elements");
- modattrs = (int *) palloc(tupdesc->natts * sizeof(int));
- modvalues = (Datum *) palloc(tupdesc->natts * sizeof(Datum));
- for (i = 0; i < tupdesc->natts; i++)
- {
- modattrs[i] = i + 1;
- modvalues[i] = (Datum) NULL;
- }
+ modattrs = (int *) palloc(tupdesc->natts * sizeof(int));
+ modvalues = (Datum *) palloc(tupdesc->natts * sizeof(Datum));
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ modattrs[i] = i + 1;
+ modvalues[i] = (Datum) NULL;
+ }
- modnulls = palloc(tupdesc->natts);
- memset(modnulls, 'n', tupdesc->natts);
+ modnulls = palloc(tupdesc->natts);
+ memset(modnulls, 'n', tupdesc->natts);
- for (i = 0; i < ret_numvals; i += 2)
- {
- CONST84 char *ret_name = ret_values[i];
- CONST84 char *ret_value = ret_values[i + 1];
- int attnum;
- HeapTuple typeTup;
- Oid typinput;
- Oid typioparam;
- FmgrInfo finfo;
+ for (i = 0; i < ret_numvals; i += 2)
+ {
+ CONST84 char *ret_name = ret_values[i];
+ CONST84 char *ret_value = ret_values[i + 1];
+ int attnum;
+ HeapTuple typeTup;
+ Oid typinput;
+ Oid typioparam;
+ FmgrInfo finfo;
- /************************************************************
- * Ignore ".tupno" pseudo elements (see pltcl_set_tuple_values)
- ************************************************************/
- if (strcmp(ret_name, ".tupno") == 0)
- continue;
+ /************************************************************
+ * Ignore ".tupno" pseudo elements (see pltcl_set_tuple_values)
+ ************************************************************/
+ if (strcmp(ret_name, ".tupno") == 0)
+ continue;
- /************************************************************
- * Get the attribute number
- ************************************************************/
- attnum = SPI_fnumber(tupdesc, ret_name);
- if (attnum == SPI_ERROR_NOATTRIBUTE)
- elog(ERROR, "invalid attribute \"%s\"", ret_name);
- if (attnum <= 0)
- elog(ERROR, "cannot set system attribute \"%s\"", ret_name);
+ /************************************************************
+ * Get the attribute number
+ ************************************************************/
+ attnum = SPI_fnumber(tupdesc, ret_name);
+ if (attnum == SPI_ERROR_NOATTRIBUTE)
+ elog(ERROR, "invalid attribute \"%s\"", ret_name);
+ if (attnum <= 0)
+ elog(ERROR, "cannot set system attribute \"%s\"", ret_name);
- /************************************************************
- * Ignore dropped columns
- ************************************************************/
- if (tupdesc->attrs[attnum - 1]->attisdropped)
- continue;
+ /************************************************************
+ * Ignore dropped columns
+ ************************************************************/
+ if (tupdesc->attrs[attnum - 1]->attisdropped)
+ continue;
- /************************************************************
- * Lookup the attribute type in the syscache
- * for the input function
- ************************************************************/
- typeTup = SearchSysCache(TYPEOID,
+ /************************************************************
+ * Lookup the attribute type in the syscache
+ * for the input function
+ ************************************************************/
+ typeTup = SearchSysCache(TYPEOID,
ObjectIdGetDatum(tupdesc->attrs[attnum - 1]->atttypid),
- 0, 0, 0);
- if (!HeapTupleIsValid(typeTup))
- elog(ERROR, "cache lookup failed for type %u",
- tupdesc->attrs[attnum - 1]->atttypid);
- typinput = ((Form_pg_type) GETSTRUCT(typeTup))->typinput;
- typioparam = getTypeIOParam(typeTup);
- ReleaseSysCache(typeTup);
+ 0, 0, 0);
+ if (!HeapTupleIsValid(typeTup))
+ elog(ERROR, "cache lookup failed for type %u",
+ tupdesc->attrs[attnum - 1]->atttypid);
+ typinput = ((Form_pg_type) GETSTRUCT(typeTup))->typinput;
+ typioparam = getTypeIOParam(typeTup);
+ ReleaseSysCache(typeTup);
- /************************************************************
- * Set the attribute to NOT NULL and convert the contents
- ************************************************************/
- modnulls[attnum - 1] = ' ';
- fmgr_info(typinput, &finfo);
- UTF_BEGIN;
- modvalues[attnum - 1] =
- FunctionCall3(&finfo,
- CStringGetDatum(UTF_U2E(ret_value)),
- ObjectIdGetDatum(typioparam),
+ /************************************************************
+ * Set the attribute to NOT NULL and convert the contents
+ ************************************************************/
+ modnulls[attnum - 1] = ' ';
+ fmgr_info(typinput, &finfo);
+ UTF_BEGIN;
+ modvalues[attnum - 1] =
+ FunctionCall3(&finfo,
+ CStringGetDatum(UTF_U2E(ret_value)),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(tupdesc->attrs[attnum - 1]->atttypmod));
- UTF_END;
- }
+ UTF_END;
+ }
- rettup = SPI_modifytuple(trigdata->tg_relation, rettup, tupdesc->natts,
- modattrs, modvalues, modnulls);
+ rettup = SPI_modifytuple(trigdata->tg_relation, rettup, tupdesc->natts,
+ modattrs, modvalues, modnulls);
- pfree(modattrs);
- pfree(modvalues);
- pfree(modnulls);
+ pfree(modattrs);
+ pfree(modvalues);
+ pfree(modnulls);
- if (rettup == NULL)
- elog(ERROR, "SPI_modifytuple() failed - RC = %d", SPI_result);
+ if (rettup == NULL)
+ elog(ERROR, "SPI_modifytuple() failed - RC = %d", SPI_result);
}
PG_CATCH();
* pltcl_elog() - elog() support for PLTcl
**********************************************************************/
static int
-pltcl_elog(ClientData cdata, Tcl_Interp *interp,
+pltcl_elog(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
volatile int level;
* be used in SPI_exec query strings
**********************************************************************/
static int
-pltcl_quote(ClientData cdata, Tcl_Interp *interp,
+pltcl_quote(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
char *tmp;
* pltcl_argisnull() - determine if a specific argument is NULL
**********************************************************************/
static int
-pltcl_argisnull(ClientData cdata, Tcl_Interp *interp,
+pltcl_argisnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int argno;
* pltcl_returnnull() - Cause a NULL return from a function
**********************************************************************/
static int
-pltcl_returnnull(ClientData cdata, Tcl_Interp *interp,
+pltcl_returnnull(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
FunctionCallInfo fcinfo = pltcl_current_fcinfo;
* for the Tcl interpreter
**********************************************************************/
static int
-pltcl_SPI_exec(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_exec(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
volatile int my_rc;
* and not save the plan currently.
**********************************************************************/
static int
-pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_prepare(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
int nargs;
oldcontext = CurrentMemoryContext;
PG_TRY();
{
- /************************************************************
- * Lookup the argument types by name in the system cache
- * and remember the required information for input conversion
- ************************************************************/
- for (i = 0; i < nargs; i++)
- {
- char *argcopy;
- List *names = NIL;
- ListCell *l;
- TypeName *typename;
-
/************************************************************
- * Use SplitIdentifierString() on a copy of the type name,
- * turn the resulting pointer list into a TypeName node
- * and call typenameType() to get the pg_type tuple.
+ * Lookup the argument types by name in the system cache
+ * and remember the required information for input conversion
************************************************************/
- argcopy = pstrdup(args[i]);
- SplitIdentifierString(argcopy, '.', &names);
- typename = makeNode(TypeName);
- foreach (l, names)
- typename->names = lappend(typename->names, makeString(lfirst(l)));
-
- typeTup = typenameType(typename);
- qdesc->argtypes[i] = HeapTupleGetOid(typeTup);
- perm_fmgr_info(((Form_pg_type) GETSTRUCT(typeTup))->typinput,
- &(qdesc->arginfuncs[i]));
- qdesc->argtypioparams[i] = getTypeIOParam(typeTup);
- ReleaseSysCache(typeTup);
+ for (i = 0; i < nargs; i++)
+ {
+ char *argcopy;
+ List *names = NIL;
+ ListCell *l;
+ TypeName *typename;
- list_free(typename->names);
- pfree(typename);
- list_free(names);
- pfree(argcopy);
- }
+ /************************************************************
+ * Use SplitIdentifierString() on a copy of the type name,
+ * turn the resulting pointer list into a TypeName node
+ * and call typenameType() to get the pg_type tuple.
+ ************************************************************/
+ argcopy = pstrdup(args[i]);
+ SplitIdentifierString(argcopy, '.', &names);
+ typename = makeNode(TypeName);
+ foreach(l, names)
+ typename->names = lappend(typename->names, makeString(lfirst(l)));
+
+ typeTup = typenameType(typename);
+ qdesc->argtypes[i] = HeapTupleGetOid(typeTup);
+ perm_fmgr_info(((Form_pg_type) GETSTRUCT(typeTup))->typinput,
+ &(qdesc->arginfuncs[i]));
+ qdesc->argtypioparams[i] = getTypeIOParam(typeTup);
+ ReleaseSysCache(typeTup);
- /************************************************************
- * Prepare the plan and check for errors
- ************************************************************/
- UTF_BEGIN;
- plan = SPI_prepare(UTF_U2E(argv[1]), nargs, qdesc->argtypes);
- UTF_END;
+ list_free(typename->names);
+ pfree(typename);
+ list_free(names);
+ pfree(argcopy);
+ }
- if (plan == NULL)
- elog(ERROR, "SPI_prepare() failed");
+ /************************************************************
+ * Prepare the plan and check for errors
+ ************************************************************/
+ UTF_BEGIN;
+ plan = SPI_prepare(UTF_U2E(argv[1]), nargs, qdesc->argtypes);
+ UTF_END;
- /************************************************************
- * Save the plan into permanent memory (right now it's in the
- * SPI procCxt, which will go away at function end).
- ************************************************************/
- qdesc->plan = SPI_saveplan(plan);
- if (qdesc->plan == NULL)
- elog(ERROR, "SPI_saveplan() failed");
+ if (plan == NULL)
+ elog(ERROR, "SPI_prepare() failed");
- /* Release the procCxt copy to avoid within-function memory leak */
- SPI_freeplan(plan);
+ /************************************************************
+ * Save the plan into permanent memory (right now it's in the
+ * SPI procCxt, which will go away at function end).
+ ************************************************************/
+ qdesc->plan = SPI_saveplan(plan);
+ if (qdesc->plan == NULL)
+ elog(ERROR, "SPI_saveplan() failed");
- /************************************************************
- * Insert a hashtable entry for the plan and return
- * the key to the caller
- ************************************************************/
- if (interp == pltcl_norm_interp)
- query_hash = pltcl_norm_query_hash;
- else
- query_hash = pltcl_safe_query_hash;
+ /* Release the procCxt copy to avoid within-function memory leak */
+ SPI_freeplan(plan);
+
+ /************************************************************
+ * Insert a hashtable entry for the plan and return
+ * the key to the caller
+ ************************************************************/
+ if (interp == pltcl_norm_interp)
+ query_hash = pltcl_norm_query_hash;
+ else
+ query_hash = pltcl_safe_query_hash;
}
PG_CATCH();
* pltcl_SPI_execp() - Execute a prepared plan
**********************************************************************/
static int
-pltcl_SPI_execp(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_execp(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
volatile int my_rc;
UTF_BEGIN;
argvalues[j] =
FunctionCall3(&qdesc->arginfuncs[j],
- CStringGetDatum(UTF_U2E(callargs[j])),
- ObjectIdGetDatum(qdesc->argtypioparams[j]),
+ CStringGetDatum(UTF_U2E(callargs[j])),
+ ObjectIdGetDatum(qdesc->argtypioparams[j]),
Int32GetDatum(-1));
UTF_END;
}
************************************************************/
oldcontext = CurrentMemoryContext;
PG_TRY();
- {
spi_rc = SPI_execp(qdesc->plan, argvalues, nulls, count);
- }
PG_CATCH();
{
MemoryContextSwitchTo(oldcontext);
* be used after insert queries
**********************************************************************/
static int
-pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp *interp,
+pltcl_SPI_lastoid(ClientData cdata, Tcl_Interp * interp,
int argc, CONST84 char *argv[])
{
char buf[64];
* of a given tuple
**********************************************************************/
static void
-pltcl_set_tuple_values(Tcl_Interp *interp, CONST84 char *arrayname,
+pltcl_set_tuple_values(Tcl_Interp * interp, CONST84 char *arrayname,
int tupno, HeapTuple tuple, TupleDesc tupdesc)
{
int i;
{
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
UTF_BEGIN;
Tcl_SetVar2(interp, *arrptr, *nameptr, UTF_E2U(outputstr), 0);
**********************************************************************/
static void
pltcl_build_tuple_argument(HeapTuple tuple, TupleDesc tupdesc,
- Tcl_DString *retval)
+ Tcl_DString * retval)
{
int i;
char *outputstr;
{
outputstr = DatumGetCString(OidFunctionCall3(typoutput,
attr,
- ObjectIdGetDatum(typioparam),
+ ObjectIdGetDatum(typioparam),
Int32GetDatum(tupdesc->attrs[i]->atttypmod)));
Tcl_DStringAppendElement(retval, attname);
UTF_BEGIN;
* Win32 (NT, Win2k, XP). replace() doesn't work on Win95/98/Me.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.21 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/dirmod.c,v 1.22 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
while (!MoveFileEx(from, to, MOVEFILE_REPLACE_EXISTING))
#endif
#ifdef __CYGWIN__
- while (rename(from, to) < 0)
+ while (rename(from, to) < 0)
#endif
- {
+ {
#ifdef WIN32
- if (GetLastError() != ERROR_ACCESS_DENIED)
+ if (GetLastError() != ERROR_ACCESS_DENIED)
#endif
#ifdef __CYGWIN__
- if (errno != EACCES)
+ if (errno != EACCES)
#endif
- /* set errno? */
- return -1;
- pg_usleep(100000); /* us */
- if (loops == 30)
+ /* set errno? */
+ return -1;
+ pg_usleep(100000); /* us */
+ if (loops == 30)
#ifndef FRONTEND
- elog(LOG, "could not rename \"%s\" to \"%s\", continuing to try",
- from, to);
+ elog(LOG, "could not rename \"%s\" to \"%s\", continuing to try",
+ from, to);
#else
- fprintf(stderr, "could not rename \"%s\" to \"%s\", continuing to try\n",
- from, to);
+ fprintf(stderr, "could not rename \"%s\" to \"%s\", continuing to try\n",
+ from, to);
#endif
- loops++;
- }
+ loops++;
+ }
if (loops > 30)
#ifndef FRONTEND
*/
typedef struct
{
- DWORD ReparseTag;
- WORD ReparseDataLength;
- WORD Reserved;
- /* SymbolicLinkReparseBuffer */
- WORD SubstituteNameOffset;
- WORD SubstituteNameLength;
- WORD PrintNameOffset;
- WORD PrintNameLength;
- WCHAR PathBuffer[1];
+ DWORD ReparseTag;
+ WORD ReparseDataLength;
+ WORD Reserved;
+ /* SymbolicLinkReparseBuffer */
+ WORD SubstituteNameOffset;
+ WORD SubstituteNameLength;
+ WORD PrintNameOffset;
+ WORD PrintNameLength;
+ WCHAR PathBuffer[1];
}
-REPARSE_JUNCTION_DATA_BUFFER;
+
+ REPARSE_JUNCTION_DATA_BUFFER;
#define REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE \
FIELD_OFFSET(REPARSE_JUNCTION_DATA_BUFFER, SubstituteNameOffset)
int
pgsymlink(const char *oldpath, const char *newpath)
{
- HANDLE dirhandle;
- DWORD len;
- char buffer[MAX_PATH*sizeof(WCHAR) + sizeof(REPARSE_JUNCTION_DATA_BUFFER)];
- char nativeTarget[MAX_PATH];
- char *p = nativeTarget;
- REPARSE_JUNCTION_DATA_BUFFER *reparseBuf = (REPARSE_JUNCTION_DATA_BUFFER*)buffer;
-
+ HANDLE dirhandle;
+ DWORD len;
+ char buffer[MAX_PATH * sizeof(WCHAR) + sizeof(REPARSE_JUNCTION_DATA_BUFFER)];
+ char nativeTarget[MAX_PATH];
+ char *p = nativeTarget;
+ REPARSE_JUNCTION_DATA_BUFFER *reparseBuf = (REPARSE_JUNCTION_DATA_BUFFER *) buffer;
+
CreateDirectory(newpath, 0);
- dirhandle = CreateFile(newpath, GENERIC_READ | GENERIC_WRITE,
- 0, 0, OPEN_EXISTING,
- FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, 0);
-
+ dirhandle = CreateFile(newpath, GENERIC_READ | GENERIC_WRITE,
+ 0, 0, OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, 0);
+
if (dirhandle == INVALID_HANDLE_VALUE)
return -1;
-
+
/* make sure we have an unparsed native win32 path */
if (memcmp("\\??\\", oldpath, 4))
sprintf(nativeTarget, "\\??\\%s", oldpath);
else
strcpy(nativeTarget, oldpath);
-
+
while ((p = strchr(p, '/')) != 0)
*p++ = '\\';
reparseBuf->Reserved = 0;
reparseBuf->SubstituteNameOffset = 0;
reparseBuf->SubstituteNameLength = len;
- reparseBuf->PrintNameOffset = len+sizeof(WCHAR);
+ reparseBuf->PrintNameOffset = len + sizeof(WCHAR);
reparseBuf->PrintNameLength = 0;
MultiByteToWideChar(CP_ACP, 0, nativeTarget, -1,
reparseBuf->PathBuffer, MAX_PATH);
-
+
/*
- * FSCTL_SET_REPARSE_POINT is coded differently depending on SDK version;
- * we use our own definition
+ * FSCTL_SET_REPARSE_POINT is coded differently depending on SDK
+ * version; we use our own definition
*/
- if (!DeviceIoControl(dirhandle,
- CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 41, METHOD_BUFFERED, FILE_ANY_ACCESS),
- reparseBuf,
- reparseBuf->ReparseDataLength + REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE,
- 0, 0, &len, 0))
+ if (!DeviceIoControl(dirhandle,
+ CTL_CODE(FILE_DEVICE_FILE_SYSTEM, 41, METHOD_BUFFERED, FILE_ANY_ACCESS),
+ reparseBuf,
+ reparseBuf->ReparseDataLength + REPARSE_JUNCTION_DATA_BUFFER_HEADER_SIZE,
+ 0, 0, &len, 0))
{
- LPSTR msg;
+ LPSTR msg;
- errno=0;
+ errno = 0;
FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
- NULL, GetLastError(),
+ NULL, GetLastError(),
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
- (LPSTR)&msg, 0, NULL );
+ (LPSTR) & msg, 0, NULL);
#ifndef FRONTEND
ereport(ERROR, (errcode_for_file_access(),
- errmsg("Error setting junction for %s: %s", nativeTarget, msg)));
+ errmsg("Error setting junction for %s: %s", nativeTarget, msg)));
#else
fprintf(stderr, "Error setting junction for %s: %s", nativeTarget, msg);
#endif
LocalFree(msg);
-
+
CloseHandle(dirhandle);
RemoveDirectory(newpath);
return -1;
return 0;
}
-
#endif
* deallocate memory used for filenames
*/
static void
-rmt_cleanup(char ** filenames)
+rmt_cleanup(char **filenames)
{
- char ** fn;
+ char **fn;
for (fn = filenames; *fn; fn++)
#ifdef FRONTEND
if (strcmp(file->d_name, ".") != 0 && strcmp(file->d_name, "..") != 0)
#ifdef FRONTEND
if ((filenames[numnames++] = strdup(file->d_name)) == NULL)
- {
+ {
fprintf(stderr, _("out of memory\n"));
exit(1);
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/exec.c,v 1.24 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/exec.c,v 1.25 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
struct passwd *pwp;
int i;
int in_grp = 0;
+
#else
char path_exe[MAXPGPATH + 2 + strlen(".exe")];
#endif
}
/* OK, check group bits */
-
- pwp = getpwuid(euid); /* not thread-safe */
+
+ pwp = getpwuid(euid); /* not thread-safe */
if (pwp)
{
if (pwp->pw_gid == buf.st_gid) /* my primary group? */
else if (pwp->pw_name &&
(gp = getgrgid(buf.st_gid)) != NULL && /* not thread-safe */
gp->gr_mem != NULL)
- { /* try list of member groups */
+ { /* try list of member groups */
for (i = 0; gp->gr_mem[i]; ++i)
{
if (!strcmp(gp->gr_mem[i], pwp->pw_name))
is_r = buf.st_mode & S_IROTH;
is_x = buf.st_mode & S_IXOTH;
return is_x ? (is_r ? 0 : -2) : -1;
-
#endif
}
* path because we will later change working directory.
*
* This function is not thread-safe because of it calls validate_exec(),
- * which calls getgrgid(). This function should be used only in
+ * which calls getgrgid(). This function should be used only in
* non-threaded binaries, not in library routines.
*/
int
find_my_exec(const char *argv0, char *retpath)
{
- char cwd[MAXPGPATH], test_path[MAXPGPATH];
- char *path;
+ char cwd[MAXPGPATH],
+ test_path[MAXPGPATH];
+ char *path;
if (!getcwd(cwd, MAXPGPATH))
cwd[0] = '\0';
/*
- * First try: use the binary that's located in the
- * same directory if it was invoked with an explicit path.
- * Presumably the user used an explicit path because it
- * wasn't in PATH, and we don't want to use incompatible executables.
+ * First try: use the binary that's located in the same directory if
+ * it was invoked with an explicit path. Presumably the user used an
+ * explicit path because it wasn't in PATH, and we don't want to use
+ * incompatible executables.
*
* For the binary: First try: if we're given some kind of path, use it
* (making sure that a relative path is made absolute before returning
StrNCpy(retpath, argv0, MAXPGPATH);
else
snprintf(retpath, MAXPGPATH, "%s/%s", cwd, argv0);
-
+
canonicalize_path(retpath);
if (validate_exec(retpath) == 0)
{
*/
if ((path = getenv("PATH")) && *path)
{
- char *startp = NULL, *endp = NULL;
+ char *startp = NULL,
+ *endp = NULL;
do
{
endp = first_path_separator(startp);
if (!endp)
- endp = startp + strlen(startp); /* point to end */
+ endp = startp + strlen(startp); /* point to end */
StrNCpy(test_path, startp, Min(endp - startp + 1, MAXPGPATH));
return -1;
#if 0
+
/*
- * Win32 has a native way to find the executable name, but the above
- * method works too.
+ * Win32 has a native way to find the executable name, but the above
+ * method works too.
*/
if (GetModuleFileName(NULL, retpath, MAXPGPATH) == 0)
- log_error("GetModuleFileName failed (%i)",(int)GetLastError());
+ log_error("GetModuleFileName failed (%i)", (int) GetLastError());
#endif
}
* Executing a command in a pipe and reading the first line from it
* is all we need.
*/
-
-static char *pipe_read_line(char *cmd, char *line, int maxsize)
+
+static char *
+pipe_read_line(char *cmd, char *line, int maxsize)
{
#ifndef WIN32
- FILE *pgver;
+ FILE *pgver;
/* flush output buffers in case popen does not... */
fflush(stdout);
if ((pgver = popen(cmd, "r")) == NULL)
return NULL;
-
+
if (fgets(line, maxsize, pgver) == NULL)
{
perror("fgets failure");
if (pclose_check(pgver))
return NULL;
-
+
return line;
#else
/* Win32 */
SECURITY_ATTRIBUTES sattr;
- HANDLE childstdoutrd, childstdoutwr, childstdoutrddup;
+ HANDLE childstdoutrd,
+ childstdoutwr,
+ childstdoutrddup;
PROCESS_INFORMATION pi;
STARTUPINFO si;
- char *retval = NULL;
+ char *retval = NULL;
sattr.nLength = sizeof(SECURITY_ATTRIBUTES);
sattr.bInheritHandle = TRUE;
if (!CreatePipe(&childstdoutrd, &childstdoutwr, &sattr, 0))
return NULL;
-
+
if (!DuplicateHandle(GetCurrentProcess(),
childstdoutrd,
GetCurrentProcess(),
}
CloseHandle(childstdoutrd);
-
- ZeroMemory(&pi,sizeof(pi));
- ZeroMemory(&si,sizeof(si));
+
+ ZeroMemory(&pi, sizeof(pi));
+ ZeroMemory(&si, sizeof(si));
si.cb = sizeof(si);
si.dwFlags = STARTF_USESTDHANDLES;
si.hStdError = childstdoutwr;
si.hStdOutput = childstdoutwr;
si.hStdInput = INVALID_HANDLE_VALUE;
-
+
if (CreateProcess(NULL,
cmd,
NULL,
&si,
&pi))
{
- DWORD bytesread = 0;
+ DWORD bytesread = 0;
+
/* Successfully started the process */
- ZeroMemory(line,maxsize);
-
+ ZeroMemory(line, maxsize);
+
/* Let's see if we can read */
- if (WaitForSingleObject(childstdoutrddup, 10000) != WAIT_OBJECT_0)
+ if (WaitForSingleObject(childstdoutrddup, 10000) != WAIT_OBJECT_0)
{
/* Got timeout */
CloseHandle(pi.hProcess);
{
/* So we read some data */
retval = line;
- int len = strlen(line);
+ int len = strlen(line);
/*
- * If EOL is \r\n, convert to just \n.
- * Because stdout is a text-mode stream, the \n output by
- * the child process is received as \r\n, so we convert it
- * to \n. The server main.c sets
- * setvbuf(stdout, NULL, _IONBF, 0) which has the effect
- * of disabling \n to \r\n expansion for stdout.
+ * If EOL is \r\n, convert to just \n. Because stdout is a
+ * text-mode stream, the \n output by the child process is
+ * received as \r\n, so we convert it to \n. The server
+ * main.c sets setvbuf(stdout, NULL, _IONBF, 0) which has the
+ * effect of disabling \n to \r\n expansion for stdout.
*/
- if (len >= 2 && line[len-2] == '\r' && line[len-1] == '\n')
+ if (len >= 2 && line[len - 2] == '\r' && line[len - 1] == '\n')
{
- line[len-2] = '\n';
- line[len-1] = '\0';
+ line[len - 2] = '\n';
+ line[len - 1] = '\0';
len--;
}
/*
- * We emulate fgets() behaviour. So if there is no newline
- * at the end, we add one...
+ * We emulate fgets() behaviour. So if there is no newline at
+ * the end, we add one...
*/
- if (len == 0 || line[len-1] != '\n')
- strcat(line,"\n");
+ if (len == 0 || line[len - 1] != '\n')
+ strcat(line, "\n");
}
CloseHandle(pi.hProcess);
CloseHandle(pi.hThread);
}
-
+
CloseHandle(childstdoutwr);
CloseHandle(childstdoutrddup);
return retval;
#endif
}
-
+
/*
{
char cmd[MAXPGPATH];
char line[100];
-
+
if (find_my_exec(argv0, retpath) < 0)
return -1;
- /* Trim off program name and keep just directory */
+ /* Trim off program name and keep just directory */
*last_dir_separator(retpath) = '\0';
canonicalize_path(retpath);
if (validate_exec(retpath))
return -1;
-
+
snprintf(cmd, sizeof(cmd), "\"%s\" -V 2>%s", retpath, DEVNULL);
if (!pipe_read_line(cmd, line, sizeof(line)))
return -1;
-
+
if (strcmp(line, versionstr) != 0)
return -2;
int
pclose_check(FILE *stream)
{
- int exitstatus;
+ int exitstatus;
exitstatus = pclose(stream);
if (exitstatus == 0)
- return 0; /* all is well */
+ return 0; /* all is well */
if (exitstatus == -1)
{
else if (WIFEXITED(exitstatus))
{
log_error(_("child process exited with exit code %d\n"),
- WEXITSTATUS(exitstatus));
+ WEXITSTATUS(exitstatus));
}
else if (WIFSIGNALED(exitstatus))
{
log_error(_("child process was terminated by signal %d\n"),
- WTERMSIG(exitstatus));
+ WTERMSIG(exitstatus));
}
else
{
log_error(_("child process exited with unrecognized status %d\n"),
- exitstatus);
+ exitstatus);
}
return -1;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/fseeko.c,v 1.15 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/fseeko.c,v 1.16 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifdef bsdi
flockfile(stream);
#endif
- fflush(stream); /* force writes to fd for stat() */
+ fflush(stream); /* force writes to fd for stat() */
if (fstat(fileno(stream), &filestat) != 0)
goto failure;
floc = filestat.st_size;
* signals that the backend can recognize.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/kill.c,v 1.4 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/kill.c,v 1.5 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
return 0;
}
+
#endif
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/noblock.c,v 1.3 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/noblock.c,v 1.4 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#include
#include
-bool set_noblock(int sock)
+bool
+set_noblock(int sock)
{
#if !defined(WIN32) && !defined(__BEOS__)
return (fcntl(sock, F_SETFL, O_NONBLOCK) != -1);
#else
- long ioctlsocket_ret = 1;
-
+ long ioctlsocket_ret = 1;
+
/* Returns non-0 on failure, while fcntl() returns -1 on failure */
#ifdef WIN32
return (ioctlsocket(sock, FIONBIO, &ioctlsocket_ret) == 0);
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/open.c,v 1.3 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/open.c,v 1.4 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
static int
openFlagsToCreateFileFlags(int openFlags)
{
- switch (openFlags & (O_CREAT|O_TRUNC|O_EXCL))
+ switch (openFlags & (O_CREAT | O_TRUNC | O_EXCL))
{
case 0:
- case O_EXCL: return OPEN_EXISTING;
+ case O_EXCL:
+ return OPEN_EXISTING;
- case O_CREAT: return OPEN_ALWAYS;
+ case O_CREAT:
+ return OPEN_ALWAYS;
case O_TRUNC:
- case O_TRUNC|O_EXCL: return TRUNCATE_EXISTING;
+ case O_TRUNC | O_EXCL:
+ return TRUNCATE_EXISTING;
- case O_CREAT|O_TRUNC: return CREATE_ALWAYS;
+ case O_CREAT | O_TRUNC:
+ return CREATE_ALWAYS;
- case O_CREAT|O_EXCL:
- case O_CREAT|O_TRUNC|O_EXCL: return CREATE_NEW;
+ case O_CREAT | O_EXCL:
+ case O_CREAT | O_TRUNC | O_EXCL:
+ return CREATE_NEW;
}
/* will never get here */
}
/*
- * - file attribute setting, based on fileMode?
- * - handle other flags? (eg FILE_FLAG_NO_BUFFERING/FILE_FLAG_WRITE_THROUGH)
+ * - file attribute setting, based on fileMode?
+ * - handle other flags? (eg FILE_FLAG_NO_BUFFERING/FILE_FLAG_WRITE_THROUGH)
*/
-int win32_open(const char* fileName, int fileFlags, ...)
+int
+win32_open(const char *fileName, int fileFlags,...)
{
- int fd;
- HANDLE h;
+ int fd;
+ HANDLE h;
SECURITY_ATTRIBUTES sa;
/* Check that we can handle the request */
- assert((fileFlags & ((O_RDONLY|O_WRONLY|O_RDWR) | O_APPEND |
- (O_RANDOM|O_SEQUENTIAL|O_TEMPORARY) |
- _O_SHORT_LIVED |
- (O_CREAT|O_TRUNC|O_EXCL) | (O_TEXT|O_BINARY))) == fileFlags);
+ assert((fileFlags & ((O_RDONLY | O_WRONLY | O_RDWR) | O_APPEND |
+ (O_RANDOM | O_SEQUENTIAL | O_TEMPORARY) |
+ _O_SHORT_LIVED |
+ (O_CREAT | O_TRUNC | O_EXCL) | (O_TEXT | O_BINARY))) == fileFlags);
- sa.nLength=sizeof(sa);
- sa.bInheritHandle=TRUE;
- sa.lpSecurityDescriptor=NULL;
+ sa.nLength = sizeof(sa);
+ sa.bInheritHandle = TRUE;
+ sa.lpSecurityDescriptor = NULL;
if ((h = CreateFile(fileName,
- /* cannot use O_RDONLY, as it == 0 */
- (fileFlags & O_RDWR) ? (GENERIC_WRITE | GENERIC_READ) :
- ((fileFlags & O_WRONLY) ? GENERIC_WRITE : GENERIC_READ),
- (FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE),
+ /* cannot use O_RDONLY, as it == 0 */
+ (fileFlags & O_RDWR) ? (GENERIC_WRITE | GENERIC_READ) :
+ ((fileFlags & O_WRONLY) ? GENERIC_WRITE : GENERIC_READ),
+ (FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE),
&sa,
openFlagsToCreateFileFlags(fileFlags),
- FILE_ATTRIBUTE_NORMAL |
- ((fileFlags & O_RANDOM) ? FILE_FLAG_RANDOM_ACCESS : 0) |
- ((fileFlags & O_SEQUENTIAL) ? FILE_FLAG_SEQUENTIAL_SCAN : 0) |
- ((fileFlags & _O_SHORT_LIVED) ? FILE_ATTRIBUTE_TEMPORARY : 0) |
- ((fileFlags & O_TEMPORARY) ? FILE_FLAG_DELETE_ON_CLOSE : 0),
+ FILE_ATTRIBUTE_NORMAL |
+ ((fileFlags & O_RANDOM) ? FILE_FLAG_RANDOM_ACCESS : 0) |
+ ((fileFlags & O_SEQUENTIAL) ? FILE_FLAG_SEQUENTIAL_SCAN : 0) |
+ ((fileFlags & _O_SHORT_LIVED) ? FILE_ATTRIBUTE_TEMPORARY : 0) |
+ ((fileFlags & O_TEMPORARY) ? FILE_FLAG_DELETE_ON_CLOSE : 0),
NULL)) == INVALID_HANDLE_VALUE)
{
switch (GetLastError())
{
- /* EMFILE, ENFILE should not occur from CreateFile. */
+ /* EMFILE, ENFILE should not occur from CreateFile. */
case ERROR_PATH_NOT_FOUND:
- case ERROR_FILE_NOT_FOUND: errno = ENOENT; break;
- case ERROR_FILE_EXISTS: errno = EEXIST; break;
- case ERROR_ACCESS_DENIED: errno = EACCES; break;
+ case ERROR_FILE_NOT_FOUND:
+ errno = ENOENT;
+ break;
+ case ERROR_FILE_EXISTS:
+ errno = EEXIST;
+ break;
+ case ERROR_ACCESS_DENIED:
+ errno = EACCES;
+ break;
default:
errno = EINVAL;
}
}
/* _open_osfhandle will, on error, set errno accordingly */
- if ((fd = _open_osfhandle((long)h,fileFlags&O_APPEND)) < 0 ||
- (fileFlags&(O_TEXT|O_BINARY) && (_setmode(fd,fileFlags&(O_TEXT|O_BINARY)) < 0)))
- CloseHandle(h); /* will not affect errno */
+ if ((fd = _open_osfhandle((long) h, fileFlags & O_APPEND)) < 0 ||
+ (fileFlags & (O_TEXT | O_BINARY) && (_setmode(fd, fileFlags & (O_TEXT | O_BINARY)) < 0)))
+ CloseHandle(h); /* will not affect errno */
return fd;
}
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/path.c,v 1.32 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/path.c,v 1.33 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef WIN32
-#define IS_DIR_SEP(ch) ((ch) == '/')
+#define IS_DIR_SEP(ch) ((ch) == '/')
#else
-#define IS_DIR_SEP(ch) ((ch) == '/' || (ch) == '\\')
+#define IS_DIR_SEP(ch) ((ch) == '/' || (ch) == '\\')
#endif
#ifndef WIN32
-#define IS_PATH_SEP(ch) ((ch) == ':')
+#define IS_PATH_SEP(ch) ((ch) == ':')
#else
-#define IS_PATH_SEP(ch) ((ch) == ';')
+#define IS_PATH_SEP(ch) ((ch) == ';')
#endif
const static char *relative_path(const char *bin_path, const char *other_path);
{
char *p;
- for (p = (char *)filename; *p; p++)
+ for (p = (char *) filename; *p; p++)
if (IS_DIR_SEP(*p))
return p;
return NULL;
{
char *p;
- for (p = (char *)filename; *p; p++)
+ for (p = (char *) filename; *p; p++)
if (IS_PATH_SEP(*p))
return p;
return NULL;
char *
last_dir_separator(const char *filename)
{
- char *p, *ret = NULL;
+ char *p,
+ *ret = NULL;
- for (p = (char *)filename; *p; p++)
+ for (p = (char *) filename; *p; p++)
if (IS_DIR_SEP(*p))
ret = p;
return ret;
make_native_path(char *filename)
{
#ifdef WIN32
- char *p;
-
+ char *p;
+
for (p = filename; *p; p++)
if (*p == '/')
*p = '\\';
canonicalize_path(char *path)
{
#ifdef WIN32
+
/*
* The Windows command processor will accept suitably quoted paths
* with forward slashes, but barfs badly with mixed forward and back
*p = '/';
}
- /* In Win32, if you do:
- * prog.exe "a b" "\c\d\"
- * the system will pass \c\d" as argv[2].
+ /*
+ * In Win32, if you do: prog.exe "a b" "\c\d\" the system will pass
+ * \c\d" as argv[2].
*/
- if (p > path && *(p-1) == '"')
- *(p-1) = '/';
+ if (p > path && *(p - 1) == '"')
+ *(p - 1) = '/';
#endif
/*
- * Removing the trailing slash on a path means we never get ugly double
- * slashes. Also, Win32 can't stat() a directory with a trailing slash.
- * Don't remove a leading slash, though.
+ * Removing the trailing slash on a path means we never get ugly
+ * double slashes. Also, Win32 can't stat() a directory with a
+ * trailing slash. Don't remove a leading slash, though.
*/
trim_trailing_separator(path);
*/
for (;;)
{
- int len = strlen(path);
+ int len = strlen(path);
if (len >= 2 && strcmp(path + len - 2, "/.") == 0)
{
get_share_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, PGSHAREDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_etc_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, SYSCONFDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_include_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, INCLUDEDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_pkginclude_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, PKGINCLUDEDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_includeserver_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, INCLUDEDIRSERVER)))
make_relative(my_exec_path, p, ret_path);
else
get_lib_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, LIBDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_pkglib_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, PKGLIBDIR)))
make_relative(my_exec_path, p, ret_path);
else
get_locale_path(const char *my_exec_path, char *ret_path)
{
const char *p;
-
+
if ((p = relative_path(PGBINDIR, LOCALEDIR)))
make_relative(my_exec_path, p, ret_path);
else
void
set_pglocale_pgservice(const char *argv0, const char *app)
{
- char path[MAXPGPATH];
- char my_exec_path[MAXPGPATH];
- char env_path[MAXPGPATH + sizeof("PGSYSCONFDIR=")]; /* longer than PGLOCALEDIR */
+ char path[MAXPGPATH];
+ char my_exec_path[MAXPGPATH];
+ char env_path[MAXPGPATH + sizeof("PGSYSCONFDIR=")]; /* longer than
+ * PGLOCALEDIR */
/* don't set LC_ALL in the backend */
if (strcmp(app, "postgres") != 0)
if (find_my_exec(argv0, my_exec_path) < 0)
return;
-
+
#ifdef ENABLE_NLS
get_locale_path(my_exec_path, path);
bindtextdomain(app, path);
if (getenv("PGSYSCONFDIR") == NULL)
{
get_etc_path(my_exec_path, path);
-
+
/* set for libpq to use */
snprintf(env_path, sizeof(env_path), "PGSYSCONFDIR=%s", path);
canonicalize_path(env_path + 13);
static void
make_relative(const char *my_exec_path, const char *p, char *ret_path)
{
- char path[MAXPGPATH];
+ char path[MAXPGPATH];
StrNCpy(path, my_exec_path, MAXPGPATH);
trim_directory(path);
relative_path(const char *bin_path, const char *other_path)
{
const char *other_sep = other_path;
-
+
#ifdef WIN32
/* Driver letters match? */
if (isalpha(*bin_path) && bin_path[1] == ':' &&
#ifndef WIN32
*bin_path != *other_path
#else
- toupper((unsigned char) *bin_path) != toupper((unsigned char)*other_path)
+ toupper((unsigned char) *bin_path) != toupper((unsigned char) *other_path)
#endif
)
break;
if (IS_DIR_SEP(*other_path))
- other_sep = other_path + 1; /* past separator */
-
+ other_sep = other_path + 1; /* past separator */
+
bin_path++;
other_path++;
}
if (!*bin_path && !*other_path)
return NULL;
- /* advance past directory name */
+ /* advance past directory name */
while (!IS_DIR_SEP(*bin_path) && *bin_path)
bin_path++;
static void
trim_directory(char *path)
{
- char *p;
-
+ char *p;
+
if (path[0] == '\0')
return;
static void
trim_trailing_separator(char *path)
{
- char *p = path + strlen(path);
+ char *p = path + strlen(path);
#ifdef WIN32
+
/*
- * Skip over network and drive specifiers for win32.
- * Set 'path' to point to the last character we must keep.
+ * Skip over network and drive specifiers for win32. Set 'path' to
+ * point to the last character we must keep.
*/
- if (strlen(path) >= 2)
- {
- if (IS_DIR_SEP(path[0]) && IS_DIR_SEP(path[1]))
- {
- path += 2;
+ if (strlen(path) >= 2)
+ {
+ if (IS_DIR_SEP(path[0]) && IS_DIR_SEP(path[1]))
+ {
+ path += 2;
while (*path && !IS_DIR_SEP(*path))
path++;
}
- else if (isalpha(path[0]) && path[1] == ':')
- {
- path++;
- if (IS_DIR_SEP(path[1]))
- path++;
- }
- }
+ else if (isalpha(path[0]) && path[1] == ':')
+ {
+ path++;
+ if (IS_DIR_SEP(path[1]))
+ path++;
+ }
+ }
#endif
/* trim off trailing slashes */
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/pgstrcasecmp.c,v 1.3 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/pgstrcasecmp.c,v 1.4 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
{
for (;;)
{
- unsigned char ch1 = (unsigned char) *s1++;
- unsigned char ch2 = (unsigned char) *s2++;
+ unsigned char ch1 = (unsigned char) *s1++;
+ unsigned char ch2 = (unsigned char) *s2++;
if (ch1 != ch2)
{
{
while (n-- > 0)
{
- unsigned char ch1 = (unsigned char) *s1++;
- unsigned char ch2 = (unsigned char) *s2++;
+ unsigned char ch1 = (unsigned char) *s1++;
+ unsigned char ch2 = (unsigned char) *s2++;
if (ch1 != ch2)
{
* must be replaced with recv/send.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/pipe.c,v 1.8 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/pipe.c,v 1.9 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if ((s = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to create socket: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to create socket: %ui", WSAGetLastError())));
return -1;
}
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(0);
serv_addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
+ if (bind(s, (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to bind: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to bind: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
if (listen(s, 1) == SOCKET_ERROR)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to listen: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to listen: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
- if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
+ if (getsockname(s, (SOCKADDR *) & serv_addr, &len) == SOCKET_ERROR)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to getsockname: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to getsockname: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
if ((handles[1] = socket(PF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to create socket 2: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to create socket 2: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
if (connect(handles[1], (SOCKADDR *) & serv_addr, len) == SOCKET_ERROR)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to connect socket: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to connect socket: %ui", WSAGetLastError())));
closesocket(s);
return -1;
}
if ((handles[0] = accept(s, (SOCKADDR *) & serv_addr, &len)) == INVALID_SOCKET)
{
- ereport(LOG,(errmsg_internal("pgpipe failed to accept socket: %ui",WSAGetLastError())));
+ ereport(LOG, (errmsg_internal("pgpipe failed to accept socket: %ui", WSAGetLastError())));
closesocket(handles[1]);
handles[1] = INVALID_SOCKET;
closesocket(s);
}
-int piperead(int s, char* buf, int len)
+int
+piperead(int s, char *buf, int len)
{
- int ret = recv(s,buf,len,0);
+ int ret = recv(s, buf, len, 0);
+
if (ret < 0 && WSAGetLastError() == WSAECONNRESET)
/* EOF on the pipe! (win32 socket based implementation) */
ret = 0;
return ret;
}
-#endif
+#endif
* of any kind. I shall in no event be liable for anything that happens
* to anyone/anything when using this software.
*/
-#define RAND48_SEED_0 (0x330e)
-#define RAND48_SEED_1 (0xabcd)
-#define RAND48_SEED_2 (0x1234)
-#define RAND48_MULT_0 (0xe66d)
-#define RAND48_MULT_1 (0xdeec)
-#define RAND48_MULT_2 (0x0005)
-#define RAND48_ADD (0x000b)
+#define RAND48_SEED_0 (0x330e)
+#define RAND48_SEED_1 (0xabcd)
+#define RAND48_SEED_2 (0x1234)
+#define RAND48_MULT_0 (0xe66d)
+#define RAND48_MULT_1 (0xdeec)
+#define RAND48_MULT_2 (0x0005)
+#define RAND48_ADD (0x000b)
unsigned short _rand48_seed[3] = {
RAND48_SEED_0,
unsigned short temp[2];
accu = (unsigned long) _rand48_mult[0] * (unsigned long) xseed[0] +
- (unsigned long) _rand48_add;
+ (unsigned long) _rand48_add;
temp[0] = (unsigned short) accu; /* lower 16 bits */
accu >>= sizeof(unsigned short) * 8;
accu += (unsigned long) _rand48_mult[0] * (unsigned long) xseed[1] +
- (unsigned long) _rand48_mult[1] * (unsigned long) xseed[0];
+ (unsigned long) _rand48_mult[1] * (unsigned long) xseed[0];
temp[1] = (unsigned short) accu; /* middle 16 bits */
accu >>= sizeof(unsigned short) * 8;
accu += _rand48_mult[0] * xseed[2] + _rand48_mult[1] * xseed[1] + _rand48_mult[2] * xseed[0];
lrand48(void)
{
_dorand48(_rand48_seed);
- return ((long) _rand48_seed[2] << 15) + ((long) _rand48_seed[1] >1);
+ return ((long) _rand48_seed[2] << 15) + ((long) _rand48_seed[1] > 1);
}
void
{
_rand48_seed[0] = RAND48_SEED_0;
_rand48_seed[1] = (unsigned short) seed;
- _rand48_seed[2] = (unsigned short) (seed >16);
+ _rand48_seed[2] = (unsigned short) (seed > 16);
_rand48_mult[0] = RAND48_MULT_0;
_rand48_mult[1] = RAND48_MULT_1;
_rand48_mult[2] = RAND48_MULT_2;
#include "postgres_fe.h"
#ifdef ENABLE_THREAD_SAFETY
-#error The replacement snprintf() is not thread-safe. \
+#error The replacement snprintf() is not thread-safe. \
Your platform must have a thread-safe snprintf() to compile with threads.
#endif
* causing nast effects.
**************************************************************/
-/*static char _id[] = "$PostgreSQL: pgsql/src/port/snprintf.c,v 1.3 2004/01/08 17:15:54 momjian Exp $";*/
+/*static char _id[] = "$PostgreSQL: pgsql/src/port/snprintf.c,v 1.4 2004/08/29 05:07:02 momjian Exp $";*/
static char *end;
static int SnprfOverflow;
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.7 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/sprompt.c,v 1.8 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#else
#ifdef WIN32
HANDLE t = NULL;
- LPDWORD t_orig = NULL;
+ LPDWORD t_orig = NULL;
#endif
#endif
*
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
- * $PostgreSQL: pgsql/src/port/thread.c,v 1.24 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/thread.c,v 1.25 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
* Additional confusion exists because many operating systems that
* use pthread_setspecific/pthread_getspecific() also have *_r versions
* of standard library functions for compatibility with operating systems
- * that require them. However, internally, these *_r functions merely
+ * that require them. However, internally, these *_r functions merely
* call the thread-safe standard library functions.
*
* For example, BSD/OS 4.3 uses Bind 8.2.3 for getpwuid(). Internally,
* getpwuid() calls pthread_setspecific/pthread_getspecific() to return
- * static data to the caller in a thread-safe manner. However, BSD/OS
+ * static data to the caller in a thread-safe manner. However, BSD/OS
* also has getpwuid_r(), which merely calls getpwuid() and shifts
* around the arguments to match the getpwuid_r() function declaration.
* Therefore, while BSD/OS has getpwuid_r(), it isn't required. It also
* Run src/tools/thread to see if your operating system has thread-safe
* non-*_r functions.
*/
-
+
/*
* Wrapper around strerror and strerror_r to use the former if it is
*/
#ifndef WIN32
int
-pqGetpwuid(uid_t uid, struct passwd *resultbuf, char *buffer,
- size_t buflen, struct passwd **result)
+pqGetpwuid(uid_t uid, struct passwd * resultbuf, char *buffer,
+ size_t buflen, struct passwd ** result)
{
#if defined(FRONTEND) && defined(ENABLE_THREAD_SAFETY) && defined(HAVE_GETPWUID_R)
/* POSIX version */
getpwuid_r(uid, resultbuf, buffer, buflen, result);
#else
+
/*
* Early POSIX draft of getpwuid_r() returns 'struct passwd *'.
- * getpwuid_r(uid, resultbuf, buffer, buflen)
+ * getpwuid_r(uid, resultbuf, buffer, buflen)
*/
*result = getpwuid_r(uid, resultbuf, buffer, buflen);
#endif
#ifndef HAVE_GETADDRINFO
int
pqGethostbyname(const char *name,
- struct hostent *resultbuf,
+ struct hostent * resultbuf,
char *buffer, size_t buflen,
- struct hostent **result,
+ struct hostent ** result,
int *herrno)
{
#if defined(FRONTEND) && defined(ENABLE_THREAD_SAFETY) && defined(HAVE_GETHOSTBYNAME_R)
+
/*
* broken (well early POSIX draft) gethostbyname_r() which returns
* 'struct hostent *'
if (*result != NULL)
*herrno = h_errno;
-
+
if (*result != NULL)
return 0;
else
return -1;
#endif
}
+
#endif
*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.2 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/port/unsetenv.c,v 1.3 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
void
unsetenv(const char *name)
{
- char *envstr;
+ char *envstr;
if (getenv(name) == NULL)
return; /* no work */
/*
- * The technique embodied here works if libc follows the Single Unix Spec
- * and actually uses the storage passed to putenv() to hold the environ
- * entry. When we clobber the entry in the second step we are ensuring
- * that we zap the actual environ member. However, there are some libc
- * implementations (notably recent BSDs) that do not obey SUS but copy
- * the presented string. This method fails on such platforms. Hopefully
- * all such platforms have unsetenv() and thus won't be using this hack.
+ * The technique embodied here works if libc follows the Single Unix
+ * Spec and actually uses the storage passed to putenv() to hold the
+ * environ entry. When we clobber the entry in the second step we are
+ * ensuring that we zap the actual environ member. However, there are
+ * some libc implementations (notably recent BSDs) that do not obey
+ * SUS but copy the presented string. This method fails on such
+ * platforms. Hopefully all such platforms have unsetenv() and thus
+ * won't be using this hack.
*
- * Note that repeatedly setting and unsetting a var using this code
- * will leak memory.
+ * Note that repeatedly setting and unsetting a var using this code will
+ * leak memory.
*/
envstr = (char *) malloc(strlen(name) + 2);
strcpy(envstr, "=");
/*
- * This last putenv cleans up if we have multiple zero-length names
- * as a result of unsetting multiple things.
+ * This last putenv cleans up if we have multiple zero-length names as
+ * a result of unsetting multiple things.
*/
putenv(envstr);
}
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.7 2004/06/03 02:08:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/localtime.c,v 1.8 2004/08/29 05:07:02 momjian Exp $
*/
/*
static void gmtsub(const pg_time_t *timep, long offset, struct pg_tm * tmp);
static void localsub(const pg_time_t *timep, long offset, struct pg_tm * tmp);
static void timesub(const pg_time_t *timep, long offset,
- const struct state * sp, struct pg_tm * tmp);
+ const struct state * sp, struct pg_tm * tmp);
static pg_time_t transtime(pg_time_t janfirst, int year,
- const struct rule * rulep, long offset);
+ const struct rule * rulep, long offset);
static int tzload(const char *name, struct state * sp);
static int tzparse(const char *name, struct state * sp, int lastditch);
/*
* `HOURSPERDAY * DAYSPERWEEK - 1' allows quasi-Posix rules like
- * "M10.4.6/26", which does not conform to Posix, but which
- * specifies the equivalent of ``02:00 on the first Sunday on or
- * after 23 Oct''.
+ * "M10.4.6/26", which does not conform to Posix, but which specifies
+ * the equivalent of ``02:00 on the first Sunday on or after 23 Oct''.
*/
strp = getnum(strp, &num, 0, HOURSPERDAY * DAYSPERWEEK - 1);
if (strp == NULL)
case JULIAN_DAY:
/*
- * Jn - Julian day, 1 == January 1, 60 == March 1 even in
- * leap years. In non-leap years, or if the day number is
- * 59 or less, just add SECSPERDAY times the day number-1 to
- * the time of January 1, midnight, to get the day.
+ * Jn - Julian day, 1 == January 1, 60 == March 1 even in leap
+ * years. In non-leap years, or if the day number is 59 or
+ * less, just add SECSPERDAY times the day number-1 to the
+ * time of January 1, midnight, to get the day.
*/
value = janfirst + (rulep->r_day - 1) * SECSPERDAY;
if (leapyear && rulep->r_day >= 60)
case DAY_OF_YEAR:
/*
- * n - day of year. Just add SECSPERDAY times the day
- * number to the time of January 1, midnight, to get the
- * day.
+ * n - day of year. Just add SECSPERDAY times the day number
+ * to the time of January 1, midnight, to get the day.
*/
value = janfirst + rulep->r_day * SECSPERDAY;
break;
value += mon_lengths[leapyear][i] * SECSPERDAY;
/*
- * Use Zeller's Congruence to get day-of-week of first day
- * of month.
+ * Use Zeller's Congruence to get day-of-week of first day of
+ * month.
*/
m1 = (rulep->r_mon + 9) % 12 + 1;
yy0 = (rulep->r_mon <= 2) ? (year - 1) : year;
dow += DAYSPERWEEK;
/*
- * "dow" is the day-of-week of the first day of the month.
- * Get the day-of-month (zero-origin) of the first "dow" day
- * of the month.
+ * "dow" is the day-of-week of the first day of the month. Get
+ * the day-of-month (zero-origin) of the first "dow" day of
+ * the month.
*/
d = rulep->r_day - dow;
if (d < 0)
struct rule end;
register int year;
register pg_time_t janfirst;
- pg_time_t starttime;
- pg_time_t endtime;
+ pg_time_t starttime;
+ pg_time_t endtime;
++name;
if ((name = getrule(name, &start)) == NULL)
theiroffset = theirstdoffset;
/*
- * Now juggle transition times and types tracking offsets
- * as you do.
+ * Now juggle transition times and types tracking offsets as
+ * you do.
*/
for (i = 0; i < sp->timecnt; ++i)
{
timesub(timep, offset, gmtptr, tmp);
/*
- * Could get fancy here and deliver something such as "UTC+xxxx"
- * or "UTC-xxxx" if offset is non-zero, but this is no time for a
+ * Could get fancy here and deliver something such as "UTC+xxxx" or
+ * "UTC-xxxx" if offset is non-zero, but this is no time for a
* treasure hunt.
*/
if (offset != 0)
register const struct state * sp, register struct pg_tm * tmp)
{
register const struct lsinfo *lp;
+
/* expand days to 64 bits to support full Julian-day range */
register int64 days;
register int idays;
if (tmp->tm_wday < 0)
tmp->tm_wday += DAYSPERWEEK;
y = EPOCH_YEAR;
+
/*
- * Note: the point of adding 4800 is to ensure we make the same assumptions
- * as Postgres' Julian-date routines about the placement of leap years
- * in centuries BC, at least back to 4713BC which is as far as we'll go.
- * This is effectively extending Gregorian timekeeping into pre-Gregorian
- * centuries, which is a tad bogus but it conforms to the SQL spec...
+ * Note: the point of adding 4800 is to ensure we make the same
+ * assumptions as Postgres' Julian-date routines about the placement
+ * of leap years in centuries BC, at least back to 4713BC which is as
+ * far as we'll go. This is effectively extending Gregorian
+ * timekeeping into pre-Gregorian centuries, which is a tad bogus but
+ * it conforms to the SQL spec...
*/
#define LEAPS_THRU_END_OF(y) (((y) + 4800) / 4 - ((y) + 4800) / 100 + ((y) + 4800) / 400)
while (days < 0 || days >= (int64) year_lengths[yleap = isleap(y)])
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.22 2004/08/29 04:13:12 momjian Exp $
+ * $PostgreSQL: pgsql/src/timezone/pgtz.c,v 1.23 2004/08/29 05:07:02 momjian Exp $
*
*-------------------------------------------------------------------------
*/
#ifndef WIN32
#define T_DAY ((time_t) (60*60*24))
-#define T_WEEK ((time_t) (60*60*24*7))
+#define T_WEEK ((time_t) (60*60*24*7))
#define T_MONTH ((time_t) (60*60*24*31))
-#define MAX_TEST_TIMES (52*100) /* 100 years, or 1904..2004 */
+#define MAX_TEST_TIMES (52*100) /* 100 years, or 1904..2004 */
struct tztry
{
};
static void scan_available_timezones(char *tzdir, char *tzdirsub,
- struct tztry *tt,
- int *bestscore, char *bestzonename);
+ struct tztry * tt,
+ int *bestscore, char *bestzonename);
/*
* Get GMT offset from a system struct tm
*/
static int
-get_timezone_offset(struct tm *tm)
+get_timezone_offset(struct tm * tm)
{
#if defined(HAVE_STRUCT_TM_TM_ZONE)
return tm->tm_gmtoff;
* Does a system tm value match one we computed ourselves?
*/
static bool
-compare_tm(struct tm *s, struct pg_tm *p)
+compare_tm(struct tm * s, struct pg_tm * p)
{
if (s->tm_sec != p->tm_sec ||
s->tm_min != p->tm_min ||
* test time.
*/
static int
-score_timezone(const char *tzname, struct tztry *tt)
+score_timezone(const char *tzname, struct tztry * tt)
{
int i;
pg_time_t pgtt;
- struct tm *systm;
- struct pg_tm *pgtm;
+ struct tm *systm;
+ struct pg_tm *pgtm;
char cbuf[TZ_STRLEN_MAX + 1];
if (!pg_tzset(tzname))
pgtt = (pg_time_t) (tt->test_times[i]);
pgtm = pg_localtime(&pgtt);
if (!pgtm)
- return -1; /* probably shouldn't happen */
+ return -1; /* probably shouldn't happen */
systm = localtime(&(tt->test_times[i]));
if (!systm)
{
if (pgtm->tm_zone == NULL)
return -1; /* probably shouldn't happen */
memset(cbuf, 0, sizeof(cbuf));
- strftime(cbuf, sizeof(cbuf) - 1, "%Z", systm); /* zone abbr */
+ strftime(cbuf, sizeof(cbuf) - 1, "%Z", systm); /* zone abbr */
if (strcmp(cbuf, pgtm->tm_zone) != 0)
{
elog(DEBUG4, "TZ \"%s\" scores %d: at %ld \"%s\" versus \"%s\"",
/*
* Set up the list of dates to be probed to see how well our timezone
* matches the system zone. We first probe January and July of 2004;
- * this serves to quickly eliminate the vast majority of the TZ database
- * entries. If those dates match, we probe every week from 2004 backwards
- * to late 1904. (Weekly resolution is good enough to identify DST
- * transition rules, since everybody switches on Sundays.) The further
- * back the zone matches, the better we score it. This may seem like
- * a rather random way of doing things, but experience has shown that
- * system-supplied timezone definitions are likely to have DST behavior
- * that is right for the recent past and not so accurate further back.
- * Scoring in this way allows us to recognize zones that have some
- * commonality with the zic database, without insisting on exact match.
- * (Note: we probe Thursdays, not Sundays, to avoid triggering
- * DST-transition bugs in localtime itself.)
+ * this serves to quickly eliminate the vast majority of the TZ
+ * database entries. If those dates match, we probe every week from
+ * 2004 backwards to late 1904. (Weekly resolution is good enough to
+ * identify DST transition rules, since everybody switches on
+ * Sundays.) The further back the zone matches, the better we score
+ * it. This may seem like a rather random way of doing things, but
+ * experience has shown that system-supplied timezone definitions are
+ * likely to have DST behavior that is right for the recent past and
+ * not so accurate further back. Scoring in this way allows us to
+ * recognize zones that have some commonality with the zic database,
+ * without insisting on exact match. (Note: we probe Thursdays, not
+ * Sundays, to avoid triggering DST-transition bugs in localtime
+ * itself.)
*/
tt.n_test_times = 0;
tt.test_times[tt.n_test_times++] = build_time_t(2004, 1, 15);
return resultbuf;
/*
- * Couldn't find a match in the database, so next we try constructed zone
- * names (like "PST8PDT").
+ * Couldn't find a match in the database, so next we try constructed
+ * zone names (like "PST8PDT").
*
- * First we need to determine the names of the local standard and daylight
- * zones. The idea here is to scan forward from today until we have
- * seen both zones, if both are in use.
+ * First we need to determine the names of the local standard and
+ * daylight zones. The idea here is to scan forward from today until
+ * we have seen both zones, if both are in use.
*/
memset(std_zone_name, 0, sizeof(std_zone_name));
memset(dst_zone_name, 0, sizeof(dst_zone_name));
return resultbuf;
/*
- * Did not find the timezone. Fallback to use a GMT zone. Note that the
- * zic timezone database names the GMT-offset zones in POSIX style: plus
- * is west of Greenwich. It's unfortunate that this is opposite of SQL
- * conventions. Should we therefore change the names? Probably not...
+ * Did not find the timezone. Fallback to use a GMT zone. Note that
+ * the zic timezone database names the GMT-offset zones in POSIX
+ * style: plus is west of Greenwich. It's unfortunate that this is
+ * opposite of SQL conventions. Should we therefore change the names?
+ * Probably not...
*/
snprintf(resultbuf, sizeof(resultbuf), "Etc/GMT%s%d",
- (-std_ofs > 0) ? "+" : "", -std_ofs / 3600);
+ (-std_ofs > 0) ? "+" : "", -std_ofs / 3600);
ereport(LOG,
- (errmsg("could not recognize system timezone, defaulting to \"%s\"",
- resultbuf),
- errhint("You can specify the correct timezone in postgresql.conf.")));
+ (errmsg("could not recognize system timezone, defaulting to \"%s\"",
+ resultbuf),
+ errhint("You can specify the correct timezone in postgresql.conf.")));
return resultbuf;
}
* Recursively scan the timezone database looking for the best match to
* the system timezone behavior.
*
- * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
+ * tzdir points to a buffer of size MAXPGPATH. On entry, it holds the
* pathname of a directory containing TZ files. We internally modify it
* to hold pathnames of sub-directories and files, but must restore it
* to its original contents before exit.
* score. bestzonename must be a buffer of length TZ_STRLEN_MAX + 1.
*/
static void
-scan_available_timezones(char *tzdir, char *tzdirsub, struct tztry *tt,
+scan_available_timezones(char *tzdir, char *tzdirsub, struct tztry * tt,
int *bestscore, char *bestzonename)
{
int tzdir_orig_len = strlen(tzdir);
else
{
/* Load and test this file */
- int score = score_timezone(tzdirsub, tt);
+ int score = score_timezone(tzdirsub, tt);
if (score > *bestscore)
{
tzdir[tzdir_orig_len] = '\0';
}
-#else /* WIN32 */
+#else /* WIN32 */
-static const struct {
- const char *stdname; /* Windows name of standard timezone */
- const char *dstname; /* Windows name of daylight timezone */
- const char *pgtzname; /* Name of pgsql timezone to map to */
-} win32_tzmap[] = {
+static const struct
+{
+ const char *stdname; /* Windows name of standard timezone */
+ const char *dstname; /* Windows name of daylight timezone */
+ const char *pgtzname; /* Name of pgsql timezone to map to */
+} win32_tzmap[] =
+
+{
/*
* This list was built from the contents of the registry at
- * HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones
- * on Windows XP Professional SP1
+ * HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows
+ * NT\CurrentVersion\Time Zones on Windows XP Professional SP1
*
* The zones have been matched to zic timezones by looking at the cities
- * listed in the win32 display name (in the comment here) in most cases.
+ * listed in the win32 display name (in the comment here) in most
+ * cases.
*/
- {"Afghanistan Standard Time", "Afghanistan Daylight Time",
- "Asia/Kabul"}, /* (GMT+04:30) Kabul */
- {"Alaskan Standard Time", "Alaskan Daylight Time",
- "US/Alaska"}, /* (GMT-09:00) Alaska */
- {"Arab Standard Time", "Arab Daylight Time",
- "Asia/Kuwait"}, /* (GMT+03:00) Kuwait, Riyadh */
- {"Arabian Standard Time", "Arabian Daylight Time",
- "Asia/Muscat"}, /* (GMT+04:00) Abu Dhabi, Muscat */
- {"Arabic Standard Time", "Arabic Daylight Time",
- "Asia/Baghdad"}, /* (GMT+03:00) Baghdad */
- {"Atlantic Standard Time", "Atlantic Daylight Time",
- "Canada/Atlantic"}, /* (GMT-04:00) Atlantic Time (Canada) */
- {"AUS Central Standard Time", "AUS Central Daylight Time",
- "Australia/Darwin"}, /* (GMT+09:30) Darwin */
- {"AUS Eastern Standard Time", "AUS Eastern Daylight Time",
- "Australia/Canberra"}, /* (GMT+10:00) Canberra, Melbourne, Sydney */
- {"Azores Standard Time", "Azores Daylight Time",
- "Atlantic/Azores"}, /* (GMT-01:00) Azores */
- {"Canada Central Standard Time", "Canada Central Daylight Time",
- "Canada/Saskatchewan"}, /* (GMT-06:00) Saskatchewan */
- {"Cape Verde Standard Time", "Cape Verde Daylight Time",
- "Atlantic/Cape_Verde"}, /* (GMT-01:00) Cape Verde Is. */
- {"Caucasus Standard Time", "Caucasus Daylight Time",
- "Asia/Baku"}, /* (GMT+04:00) Baku, Tbilisi, Yerevan */
- {"Cen. Australia Standard Time", "Cen. Australia Daylight Time",
- "Australia/Adelaide"}, /* (GMT+09:30) Adelaide */
- {"Central America Standard Time", "Central America Daylight Time",
- "CST6CDT"}, /* (GMT-06:00) Central America */
- {"Central Asia Standard Time", "Central Asia Daylight Time",
- "Asia/Dhaka"}, /* (GMT+06:00) Astana, Dhaka */
- {"Central Europe Standard Time", "Central Europe Daylight Time",
- "Europe/Belgrade"}, /* (GMT+01:00) Belgrade, Bratislava, Budapest, Ljubljana, Prague */
- {"Central European Standard Time", "Central European Daylight Time",
- "Europe/Sarajevo"}, /* (GMT+01:00) Sarajevo, Skopje, Warsaw, Zagreb */
- {"Central Pacific Standard Time", "Central Pacific Daylight Time",
- "Pacific/Noumea"}, /* (GMT+11:00) Magadan, Solomon Is., New Caledonia */
- {"Central Standard Time", "Central Daylight Time",
- "US/Central"}, /* (GMT-06:00) Central Time (US & Canada) */
- {"China Standard Time", "China Daylight Time",
- "Asia/Hong_Kong"}, /* (GMT+08:00) Beijing, Chongqing, Hong Kong, Urumqi */
- {"Dateline Standard Time", "Dateline Daylight Time",
- "Etc/GMT+12"}, /* (GMT-12:00) International Date Line West */
- {"E. Africa Standard Time", "E. Africa Daylight Time",
- "Africa/Nairobi"}, /* (GMT+03:00) Nairobi */
- {"E. Australia Standard Time", "E. Australia Daylight Time",
- "Australia/Brisbane"}, /* (GMT+10:00) Brisbane */
- {"E. Europe Standard Time", "E. Europe Daylight Time",
- "Europe/Bucharest"}, /* (GMT+02:00) Bucharest */
- {"E. South America Standard Time", "E. South America Daylight Time",
- "America/Araguaina"}, /* (GMT-03:00) Brasilia */
- {"Eastern Standard Time", "Eastern Daylight Time",
- "US/Eastern"}, /* (GMT-05:00) Eastern Time (US & Canada) */
- {"Egypt Standard Time", "Egypt Daylight Time",
- "Africa/Cairo"}, /* (GMT+02:00) Cairo */
- {"Ekaterinburg Standard Time", "Ekaterinburg Daylight Time",
- "Asia/Yekaterinburg"}, /* (GMT+05:00) Ekaterinburg */
- {"Fiji Standard Time", "Fiji Daylight Time",
- "Pacific/Fiji"}, /* (GMT+12:00) Fiji, Kamchatka, Marshall Is. */
- {"FLE Standard Time", "FLE Daylight Time",
- "Europe/Helsinki"}, /* (GMT+02:00) Helsinki, Kyiv, Riga, Sofia, Tallinn, Vilnius */
- {"GMT Standard Time", "GMT Daylight Time",
- "Europe/Dublin"}, /* (GMT) Greenwich Mean Time : Dublin, Edinburgh, Lisbon, London */
- {"Greenland Standard Time", "Greenland Daylight Time",
- "America/Godthab"}, /* (GMT-03:00) Greenland */
- {"Greenwich Standard Time", "Greenwich Daylight Time",
- "Africa/Casablanca"}, /* (GMT) Casablanca, Monrovia */
- {"GTB Standard Time", "GTB Daylight Time",
- "Europe/Athens"}, /* (GMT+02:00) Athens, Istanbul, Minsk */
- {"Hawaiian Standard Time", "Hawaiian Daylight Time",
- "US/Hawaii"}, /* (GMT-10:00) Hawaii */
- {"India Standard Time", "India Daylight Time",
- "Asia/Calcutta"}, /* (GMT+05:30) Chennai, Kolkata, Mumbai, New Delhi */
- {"Iran Standard Time", "Iran Daylight Time",
- "Asia/Tehran"}, /* (GMT+03:30) Tehran */
- {"Jerusalem Standard Time", "Jerusalem Daylight Time",
- "Asia/Jerusalem"}, /* (GMT+02:00) Jerusalem */
- {"Korea Standard Time", "Korea Daylight Time",
- "Asia/Seoul"}, /* (GMT+09:00) Seoul */
- {"Mexico Standard Time", "Mexico Daylight Time",
- "America/Mexico_City"}, /* (GMT-06:00) Guadalajara, Mexico City, Monterrey */
- {"Mexico Standard Time", "Mexico Daylight Time",
- "America/La_Paz"}, /* (GMT-07:00) Chihuahua, La Paz, Mazatlan */
- {"Mid-Atlantic Standard Time", "Mid-Atlantic Daylight Time",
- "Atlantic/South_Georgia"}, /* (GMT-02:00) Mid-Atlantic */
- {"Mountain Standard Time", "Mountain Daylight Time",
- "US/Mountain"}, /* (GMT-07:00) Mountain Time (US & Canada) */
- {"Myanmar Standard Time", "Myanmar Daylight Time",
- "Asia/Rangoon"}, /* (GMT+06:30) Rangoon */
- {"N. Central Asia Standard Time", "N. Central Asia Daylight Time",
- "Asia/Almaty"}, /* (GMT+06:00) Almaty, Novosibirsk */
- {"Nepal Standard Time", "Nepal Daylight Time",
- "Asia/Katmandu"}, /* (GMT+05:45) Kathmandu */
- {"New Zealand Standard Time", "New Zealand Daylight Time",
- "Pacific/Auckland"}, /* (GMT+12:00) Auckland, Wellington */
- {"Newfoundland Standard Time", "Newfoundland Daylight Time",
- "Canada/Newfoundland"}, /* (GMT-03:30) Newfoundland */
- {"North Asia East Standard Time", "North Asia East Daylight Time",
- "Asia/Irkutsk"}, /* (GMT+08:00) Irkutsk, Ulaan Bataar */
- {"North Asia Standard Time", "North Asia Daylight Time",
- "Asia/Krasnoyarsk"}, /* (GMT+07:00) Krasnoyarsk */
- {"Pacific SA Standard Time", "Pacific SA Daylight Time",
- "America/Santiago"}, /* (GMT-04:00) Santiago */
- {"Pacific Standard Time", "Pacific Daylight Time",
- "US/Pacific"}, /* (GMT-08:00) Pacific Time (US & Canada); Tijuana */
- {"Romance Standard Time", "Romance Daylight Time",
- "Europe/Brussels"}, /* (GMT+01:00) Brussels, Copenhagen, Madrid, Paris */
- {"Russian Standard Time", "Russian Daylight Time",
- "Europe/Moscow"}, /* (GMT+03:00) Moscow, St. Petersburg, Volgograd */
- {"SA Eastern Standard Time", "SA Eastern Daylight Time",
- "America/Buenos_Aires"}, /* (GMT-03:00) Buenos Aires, Georgetown */
- {"SA Pacific Standard Time", "SA Pacific Daylight Time",
- "America/Bogota"}, /* (GMT-05:00) Bogota, Lima, Quito */
- {"SA Western Standard Time", "SA Western Daylight Time",
- "America/Caracas"}, /* (GMT-04:00) Caracas, La Paz */
- {"Samoa Standard Time", "Samoa Daylight Time",
- "Pacific/Midway"}, /* (GMT-11:00) Midway Island, Samoa */
- {"SE Asia Standard Time", "SE Asia Daylight Time",
- "Asia/Bangkok"}, /* (GMT+07:00) Bangkok, Hanoi, Jakarta */
- {"Malay Peninsula Standard Time", "Malay Peninsula Daylight Time",
- "Asia/Kuala_Lumpur"}, /* (GMT+08:00) Kuala Lumpur, Singapore */
- {"South Africa Standard Time", "South Africa Daylight Time",
- "Africa/Harare"}, /* (GMT+02:00) Harare, Pretoria */
- {"Sri Lanka Standard Time", "Sri Lanka Daylight Time",
- "Asia/Colombo"}, /* (GMT+06:00) Sri Jayawardenepura */
- {"Taipei Standard Time", "Taipei Daylight Time",
- "Asia/Taipei"}, /* (GMT+08:00) Taipei */
- {"Tasmania Standard Time", "Tasmania Daylight Time",
- "Australia/Hobart"}, /* (GMT+10:00) Hobart */
- {"Tokyo Standard Time", "Tokyo Daylight Time",
- "Asia/Tokyo"}, /* (GMT+09:00) Osaka, Sapporo, Tokyo */
- {"Tonga Standard Time", "Tonga Daylight Time",
- "Pacific/Tongatapu"}, /* (GMT+13:00) Nuku'alofa */
- {"US Eastern Standard Time", "US Eastern Daylight Time",
- "US/Eastern"}, /* (GMT-05:00) Indiana (East) */
- {"US Mountain Standard Time", "US Mountain Daylight Time",
- "US/Arizona"}, /* (GMT-07:00) Arizona */
- {"Vladivostok Standard Time", "Vladivostok Daylight Time",
- "Asia/Vladivostok"}, /* (GMT+10:00) Vladivostok */
- {"W. Australia Standard Time", "W. Australia Daylight Time",
- "Australia/Perth"}, /* (GMT+08:00) Perth */
+ {
+ "Afghanistan Standard Time", "Afghanistan Daylight Time",
+ "Asia/Kabul"
+ }, /* (GMT+04:30) Kabul */
+ {
+ "Alaskan Standard Time", "Alaskan Daylight Time",
+ "US/Alaska"
+ }, /* (GMT-09:00) Alaska */
+ {
+ "Arab Standard Time", "Arab Daylight Time",
+ "Asia/Kuwait"
+ }, /* (GMT+03:00) Kuwait, Riyadh */
+ {
+ "Arabian Standard Time", "Arabian Daylight Time",
+ "Asia/Muscat"
+ }, /* (GMT+04:00) Abu Dhabi, Muscat */
+ {
+ "Arabic Standard Time", "Arabic Daylight Time",
+ "Asia/Baghdad"
+ }, /* (GMT+03:00) Baghdad */
+ {
+ "Atlantic Standard Time", "Atlantic Daylight Time",
+ "Canada/Atlantic"
+ }, /* (GMT-04:00) Atlantic Time (Canada) */
+ {
+ "AUS Central Standard Time", "AUS Central Daylight Time",
+ "Australia/Darwin"
+ }, /* (GMT+09:30) Darwin */
+ {
+ "AUS Eastern Standard Time", "AUS Eastern Daylight Time",
+ "Australia/Canberra"
+ }, /* (GMT+10:00) Canberra, Melbourne, Sydney */
+ {
+ "Azores Standard Time", "Azores Daylight Time",
+ "Atlantic/Azores"
+ }, /* (GMT-01:00) Azores */
+ {
+ "Canada Central Standard Time", "Canada Central Daylight Time",
+ "Canada/Saskatchewan"
+ }, /* (GMT-06:00) Saskatchewan */
+ {
+ "Cape Verde Standard Time", "Cape Verde Daylight Time",
+ "Atlantic/Cape_Verde"
+ }, /* (GMT-01:00) Cape Verde Is. */
+ {
+ "Caucasus Standard Time", "Caucasus Daylight Time",
+ "Asia/Baku"
+ }, /* (GMT+04:00) Baku, Tbilisi, Yerevan */
+ {
+ "Cen. Australia Standard Time", "Cen. Australia Daylight Time",
+ "Australia/Adelaide"
+ }, /* (GMT+09:30) Adelaide */
+ {
+ "Central America Standard Time", "Central America Daylight Time",
+ "CST6CDT"
+ }, /* (GMT-06:00) Central America */
+ {
+ "Central Asia Standard Time", "Central Asia Daylight Time",
+ "Asia/Dhaka"
+ }, /* (GMT+06:00) Astana, Dhaka */
+ {
+ "Central Europe Standard Time", "Central Europe Daylight Time",
+ "Europe/Belgrade"
+ }, /* (GMT+01:00) Belgrade, Bratislava,
+ * Budapest, Ljubljana, Prague */
+ {
+ "Central European Standard Time", "Central European Daylight Time",
+ "Europe/Sarajevo"
+ }, /* (GMT+01:00) Sarajevo, Skopje, Warsaw,
+ * Zagreb */
+ {
+ "Central Pacific Standard Time", "Central Pacific Daylight Time",
+ "Pacific/Noumea"
+ }, /* (GMT+11:00) Magadan, Solomon Is., New
+ * Caledonia */
+ {
+ "Central Standard Time", "Central Daylight Time",
+ "US/Central"
+ }, /* (GMT-06:00) Central Time (US & Canada) */
+ {
+ "China Standard Time", "China Daylight Time",
+ "Asia/Hong_Kong"
+ }, /* (GMT+08:00) Beijing, Chongqing, Hong
+ * Kong, Urumqi */
+ {
+ "Dateline Standard Time", "Dateline Daylight Time",
+ "Etc/GMT+12"
+ }, /* (GMT-12:00) International Date Line
+ * West */
+ {
+ "E. Africa Standard Time", "E. Africa Daylight Time",
+ "Africa/Nairobi"
+ }, /* (GMT+03:00) Nairobi */
+ {
+ "E. Australia Standard Time", "E. Australia Daylight Time",
+ "Australia/Brisbane"
+ }, /* (GMT+10:00) Brisbane */
+ {
+ "E. Europe Standard Time", "E. Europe Daylight Time",
+ "Europe/Bucharest"
+ }, /* (GMT+02:00) Bucharest */
+ {
+ "E. South America Standard Time", "E. South America Daylight Time",
+ "America/Araguaina"
+ }, /* (GMT-03:00) Brasilia */
+ {
+ "Eastern Standard Time", "Eastern Daylight Time",
+ "US/Eastern"
+ }, /* (GMT-05:00) Eastern Time (US & Canada) */
+ {
+ "Egypt Standard Time", "Egypt Daylight Time",
+ "Africa/Cairo"
+ }, /* (GMT+02:00) Cairo */
+ {
+ "Ekaterinburg Standard Time", "Ekaterinburg Daylight Time",
+ "Asia/Yekaterinburg"
+ }, /* (GMT+05:00) Ekaterinburg */
+ {
+ "Fiji Standard Time", "Fiji Daylight Time",
+ "Pacific/Fiji"
+ }, /* (GMT+12:00) Fiji, Kamchatka, Marshall
+ * Is. */
+ {
+ "FLE Standard Time", "FLE Daylight Time",
+ "Europe/Helsinki"
+ }, /* (GMT+02:00) Helsinki, Kyiv, Riga,
+ * Sofia, Tallinn, Vilnius */
+ {
+ "GMT Standard Time", "GMT Daylight Time",
+ "Europe/Dublin"
+ }, /* (GMT) Greenwich Mean Time : Dublin,
+ * Edinburgh, Lisbon, London */
+ {
+ "Greenland Standard Time", "Greenland Daylight Time",
+ "America/Godthab"
+ }, /* (GMT-03:00) Greenland */
+ {
+ "Greenwich Standard Time", "Greenwich Daylight Time",
+ "Africa/Casablanca"
+ }, /* (GMT) Casablanca, Monrovia */
+ {
+ "GTB Standard Time", "GTB Daylight Time",
+ "Europe/Athens"
+ }, /* (GMT+02:00) Athens, Istanbul, Minsk */
+ {
+ "Hawaiian Standard Time", "Hawaiian Daylight Time",
+ "US/Hawaii"
+ }, /* (GMT-10:00) Hawaii */
+ {
+ "India Standard Time", "India Daylight Time",
+ "Asia/Calcutta"
+ }, /* (GMT+05:30) Chennai, Kolkata, Mumbai,
+ * New Delhi */
+ {
+ "Iran Standard Time", "Iran Daylight Time",
+ "Asia/Tehran"
+ }, /* (GMT+03:30) Tehran */
+ {
+ "Jerusalem Standard Time", "Jerusalem Daylight Time",
+ "Asia/Jerusalem"
+ }, /* (GMT+02:00) Jerusalem */
+ {
+ "Korea Standard Time", "Korea Daylight Time",
+ "Asia/Seoul"
+ }, /* (GMT+09:00) Seoul */
+ {
+ "Mexico Standard Time", "Mexico Daylight Time",
+ "America/Mexico_City"
+ }, /* (GMT-06:00) Guadalajara, Mexico City,
+ * Monterrey */
+ {
+ "Mexico Standard Time", "Mexico Daylight Time",
+ "America/La_Paz"
+ }, /* (GMT-07:00) Chihuahua, La Paz, Mazatlan */
+ {
+ "Mid-Atlantic Standard Time", "Mid-Atlantic Daylight Time",
+ "Atlantic/South_Georgia"
+ }, /* (GMT-02:00) Mid-Atlantic */
+ {
+ "Mountain Standard Time", "Mountain Daylight Time",
+ "US/Mountain"
+ }, /* (GMT-07:00) Mountain Time (US & Canada) */
+ {
+ "Myanmar Standard Time", "Myanmar Daylight Time",
+ "Asia/Rangoon"
+ }, /* (GMT+06:30) Rangoon */
+ {
+ "N. Central Asia Standard Time", "N. Central Asia Daylight Time",
+ "Asia/Almaty"
+ }, /* (GMT+06:00) Almaty, Novosibirsk */
+ {
+ "Nepal Standard Time", "Nepal Daylight Time",
+ "Asia/Katmandu"
+ }, /* (GMT+05:45) Kathmandu */
+ {
+ "New Zealand Standard Time", "New Zealand Daylight Time",
+ "Pacific/Auckland"
+ }, /* (GMT+12:00) Auckland, Wellington */
+ {
+ "Newfoundland Standard Time", "Newfoundland Daylight Time",
+ "Canada/Newfoundland"
+ }, /* (GMT-03:30) Newfoundland */
+ {
+ "North Asia East Standard Time", "North Asia East Daylight Time",
+ "Asia/Irkutsk"
+ }, /* (GMT+08:00) Irkutsk, Ulaan Bataar */
+ {
+ "North Asia Standard Time", "North Asia Daylight Time",
+ "Asia/Krasnoyarsk"
+ }, /* (GMT+07:00) Krasnoyarsk */
+ {
+ "Pacific SA Standard Time", "Pacific SA Daylight Time",
+ "America/Santiago"
+ }, /* (GMT-04:00) Santiago */
+ {
+ "Pacific Standard Time", "Pacific Daylight Time",
+ "US/Pacific"
+ }, /* (GMT-08:00) Pacific Time (US & Canada);
+ * Tijuana */
+ {
+ "Romance Standard Time", "Romance Daylight Time",
+ "Europe/Brussels"
+ }, /* (GMT+01:00) Brussels, Copenhagen,
+ * Madrid, Paris */
+ {
+ "Russian Standard Time", "Russian Daylight Time",
+ "Europe/Moscow"
+ }, /* (GMT+03:00) Moscow, St. Petersburg,
+ * Volgograd */
+ {
+ "SA Eastern Standard Time", "SA Eastern Daylight Time",
+ "America/Buenos_Aires"
+ }, /* (GMT-03:00) Buenos Aires, Georgetown */
+ {
+ "SA Pacific Standard Time", "SA Pacific Daylight Time",
+ "America/Bogota"
+ }, /* (GMT-05:00) Bogota, Lima, Quito */
+ {
+ "SA Western Standard Time", "SA Western Daylight Time",
+ "America/Caracas"
+ }, /* (GMT-04:00) Caracas, La Paz */
+ {
+ "Samoa Standard Time", "Samoa Daylight Time",
+ "Pacific/Midway"
+ }, /* (GMT-11:00) Midway Island, Samoa */
+ {
+ "SE Asia Standard Time", "SE Asia Daylight Time",
+ "Asia/Bangkok"
+ }, /* (GMT+07:00) Bangkok, Hanoi, Jakarta */
+ {
+ "Malay Peninsula Standard Time", "Malay Peninsula Daylight Time",
+ "Asia/Kuala_Lumpur"
+ }, /* (GMT+08:00) Kuala Lumpur, Singapore */
+ {
+ "South Africa Standard Time", "South Africa Daylight Time",
+ "Africa/Harare"
+ }, /* (GMT+02:00) Harare, Pretoria */
+ {
+ "Sri Lanka Standard Time", "Sri Lanka Daylight Time",
+ "Asia/Colombo"
+ }, /* (GMT+06:00) Sri Jayawardenepura */
+ {
+ "Taipei Standard Time", "Taipei Daylight Time",
+ "Asia/Taipei"
+ }, /* (GMT+08:00) Taipei */
+ {
+ "Tasmania Standard Time", "Tasmania Daylight Time",
+ "Australia/Hobart"
+ }, /* (GMT+10:00) Hobart */
+ {
+ "Tokyo Standard Time", "Tokyo Daylight Time",
+ "Asia/Tokyo"
+ }, /* (GMT+09:00) Osaka, Sapporo, Tokyo */
+ {
+ "Tonga Standard Time", "Tonga Daylight Time",
+ "Pacific/Tongatapu"
+ }, /* (GMT+13:00) Nuku'alofa */
+ {
+ "US Eastern Standard Time", "US Eastern Daylight Time",
+ "US/Eastern"
+ }, /* (GMT-05:00) Indiana (East) */
+ {
+ "US Mountain Standard Time", "US Mountain Daylight Time",
+ "US/Arizona"
+ }, /* (GMT-07:00) Arizona */
+ {
+ "Vladivostok Standard Time", "Vladivostok Daylight Time",
+ "Asia/Vladivostok"
+ }, /* (GMT+10:00) Vladivostok */
+ {
+ "W. Australia Standard Time", "W. Australia Daylight Time",
+ "Australia/Perth"
+ }, /* (GMT+08:00) Perth */
/* {"W. Central Africa Standard Time", "W. Central Africa Daylight Time",
- ""}, Could not find a match for this one. Excluded for now. */ /* (GMT+01:00) West Central Africa */
- {"W. Europe Standard Time", "W. Europe Daylight Time",
- "CET"}, /* (GMT+01:00) Amsterdam, Berlin, Bern, Rome, Stockholm, Vienna */
- {"West Asia Standard Time", "West Asia Daylight Time",
- "Asia/Karachi"}, /* (GMT+05:00) Islamabad, Karachi, Tashkent */
- {"West Pacific Standard Time", "West Pacific Daylight Time",
- "Pacific/Guam"}, /* (GMT+10:00) Guam, Port Moresby */
- {"Yakutsk Standard Time", "Yakutsk Daylight Time",
- "Asia/Yakutsk"}, /* (GMT+09:00) Yakutsk */
- {NULL, NULL, NULL}
+ * ""}, Could not find a match for this one. Excluded for now. *//* (G
+ * MT+01:00) West Central Africa */
+ {
+ "W. Europe Standard Time", "W. Europe Daylight Time",
+ "CET"
+ }, /* (GMT+01:00) Amsterdam, Berlin, Bern,
+ * Rome, Stockholm, Vienna */
+ {
+ "West Asia Standard Time", "West Asia Daylight Time",
+ "Asia/Karachi"
+ }, /* (GMT+05:00) Islamabad, Karachi,
+ * Tashkent */
+ {
+ "West Pacific Standard Time", "West Pacific Daylight Time",
+ "Pacific/Guam"
+ }, /* (GMT+10:00) Guam, Port Moresby */
+ {
+ "Yakutsk Standard Time", "Yakutsk Daylight Time",
+ "Asia/Yakutsk"
+ }, /* (GMT+09:00) Yakutsk */
+ {
+ NULL, NULL, NULL
+ }
};
static const char *
identify_system_timezone(void)
{
- int i;
- char tzname[128];
- time_t t = time(NULL);
- struct tm *tm = localtime(&t);
+ int i;
+ char tzname[128];
+ time_t t = time(NULL);
+ struct tm *tm = localtime(&t);
if (!tm)
{
}
memset(tzname, 0, sizeof(tzname));
- strftime(tzname, sizeof(tzname)-1, "%Z", tm);
+ strftime(tzname, sizeof(tzname) - 1, "%Z", tm);
- for (i=0; win32_tzmap[i].stdname != NULL; i++)
+ for (i = 0; win32_tzmap[i].stdname != NULL; i++)
{
if (strcmp(tzname, win32_tzmap[i].stdname) == 0 ||
strcmp(tzname, win32_tzmap[i].dstname) == 0)
tzname)));
return NULL;
}
-
-#endif /* WIN32 */
+#endif /* WIN32 */
/*
const char *
select_default_timezone(void)
{
- const char *def_tz;
+ const char *def_tz;
def_tz = getenv("TZ");
if (def_tz && pg_tzset(def_tz) && tz_acceptable())
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/private.h,v 1.8 2004/05/21 20:59:10 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/private.h,v 1.9 2004/08/29 05:07:02 momjian Exp $
*/
/*
#ifndef remove
extern int unlink(const char *filename);
+
#define remove unlink
#endif /* !defined remove */
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/strftime.c,v 1.4 2004/06/03 02:08:07 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/strftime.c,v 1.5 2004/08/29 05:07:02 momjian Exp $
*/
#include "postgres.h"
/*
* x_fmt
*
- * C99 requires this format. Using just numbers (as here)
- * makes Quakers happier; it's also compatible with SVR4.
+ * C99 requires this format. Using just numbers (as here) makes Quakers
+ * happier; it's also compatible with SVR4.
*/
"%m/%d/%y",
/*
* c_fmt
*
- * C99 requires this format. Previously this code used "%D %X", but we now
- * conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by Solaris
- * 2.3.
+ * C99 requires this format. Previously this code used "%D %X", but we
+ * now conform to C99. Note that "%a %b %d %H:%M:%S %Y" is used by
+ * Solaris 2.3.
*/
"%a %b %e %T %Y",
static char *_add(const char *, char *, const char *);
static char *_conv(int, const char *, char *, const char *);
static char *_fmt(const char *, const struct pg_tm *, char *,
- const char *, int *);
+ const char *, int *);
#define IN_NONE 0
#define IN_SOME 1
size_t
pg_strftime(char *s, size_t maxsize, const char *format,
- const struct pg_tm *t)
+ const struct pg_tm * t)
{
char *p;
int warn;
case 'O':
/*
- * C99 locale modifiers. The sequences %Ec %EC
- * %Ex %EX %Ey %EY %Od %oe %OH %OI %Om %OM %OS
- * %Ou %OU %OV %Ow %OW %Oy are supposed to provide
- * alternate representations.
+ * C99 locale modifiers. The sequences %Ec %EC %Ex
+ * %EX %Ey %EY %Od %oe %OH %OI %Om %OM %OS %Ou %OU
+ * %OV %Ow %OW %Oy are supposed to provide alternate
+ * representations.
*/
goto label;
case 'e':
case 'k':
/*
- * This used to be... _conv(t->tm_hour % 12 ? t->tm_hour
- * % 12 : 12, 2, ' '); ...and has been changed to the
- * below to match SunOS 4.1.1 and Arnold Robbins' strftime
- * version 3.0. That is, "%k" and "%l" have been
- * swapped. (ado, 1993-05-24)
+ * This used to be... _conv(t->tm_hour % 12 ?
+ * t->tm_hour % 12 : 12, 2, ' '); ...and has been
+ * changed to the below to match SunOS 4.1.1 and
+ * Arnold Robbins' strftime version 3.0. That is,
+ * "%k" and "%l" have been swapped. (ado, 1993-05-24)
*/
pt = _conv(t->tm_hour, "%2d", pt, ptlim);
continue;
/*
* This used to be... _conv(t->tm_hour, 2, ' ');
- * ...and has been changed to the below to match
- * SunOS 4.1.1 and Arnold Robbin's strftime version
- * 3.0. That is, "%k" and "%l" have been swapped.
- * (ado, 1993-05-24)
+ * ...and has been changed to the below to match SunOS
+ * 4.1.1 and Arnold Robbin's strftime version 3.0.
+ * That is, "%k" and "%l" have been swapped. (ado,
+ * 1993-05-24)
*/
pt = _conv((t->tm_hour % 12) ?
(t->tm_hour % 12) : 12,
case 'u':
/*
- * From Arnold Robbins' strftime version 3.0: "ISO 8601:
- * Weekday as a decimal number [1 (Monday) - 7]"
+ * From Arnold Robbins' strftime version 3.0: "ISO
+ * 8601: Weekday as a decimal number [1 (Monday) - 7]"
* (ado, 1993-05-24)
*/
pt = _conv((t->tm_wday == 0) ?
DAYSPERWEEK) - 3;
/*
- * What yday does the NEXT ISO year begin
- * on?
+ * What yday does the NEXT ISO year begin on?
*/
top = bot -
(len % DAYSPERWEEK);
case 'v':
/*
- * From Arnold Robbins' strftime version 3.0:
- * "date as dd-bbb-YYYY" (ado, 1993-05-24)
+ * From Arnold Robbins' strftime version 3.0: "date as
+ * dd-bbb-YYYY" (ado, 1993-05-24)
*/
pt = _fmt("%e-%b-%Y", t, pt, ptlim, warnp);
continue;
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/timezone/zic.c,v 1.11 2004/08/11 16:53:28 tgl Exp $
+ * $PostgreSQL: pgsql/src/timezone/zic.c,v 1.12 2004/08/29 05:07:02 momjian Exp $
*/
#include "postgres.h"
static struct attype
{
- pg_time_t at;
+ pg_time_t at;
unsigned char type;
} attypes[TZ_MAX_TIMES];
static long gmtoffs[TZ_MAX_TYPES];
error(const char *string)
{
/*
- * Match the format of "cc" to allow sh users to zic ... 2>&1 |
- * error -t "*" -v on BSD systems.
+ * Match the format of "cc" to allow sh users to zic ... 2>&1 | error
+ * -t "*" -v on BSD systems.
*/
(void) fprintf(stderr, _("\"%s\", line %d: %s"),
filename, linenum, string);
* pg_time_t is always signed, but might be only 32 bits ...
*/
min_time = ~(pg_time_t) 0;
- min_time <<= TYPE_BIT(pg_time_t) - 1;
+ min_time <<= TYPE_BIT(pg_time_t) -1;
max_time = ~(pg_time_t) 0 - min_time;
/*
- * For the moment, hard-wire the range as 1901 to 2038. We cannot
- * go wider without adopting an incompatible zone file format, which
- * is a step I'd just as soon not take just yet.
+ * For the moment, hard-wire the range as 1901 to 2038. We cannot go
+ * wider without adopting an incompatible zone file format, which is a
+ * step I'd just as soon not take just yet.
*/
min_time = Max(min_time, (pg_time_t) INT_MIN);
max_time = Min(max_time, (pg_time_t) INT_MAX);
TRUE);
/*
- * Note, though, that if there's no rule, a '%s' in the
- * format is a bad thing.
+ * Note, though, that if there's no rule, a '%s' in the format
+ * is a bad thing.
*/
if (strchr(zp->z_format, '%') != 0)
error(_("%s in ruleless zone"));
zones[nzones++] = z;
/*
- * If there was an UNTIL field on this line, there's more
- * information about the zone on the next line.
+ * If there was an UNTIL field on this line, there's more information
+ * about the zone on the next line.
*/
return hasuntil;
}
day;
long dayoff,
tod;
- pg_time_t t;
+ pg_time_t t;
if (nfields != LEAP_FIELDS)
{
min_year = rp->r_loyear;
/*
- * Day work. Accept things such as: 1 last-Sunday Sun<=20 Sun>=7
+ * Day work. Accept things such as: 1 last-Sunday Sun<=20 Sun>=7
*/
dp = ecpyalloc(dayp);
if ((lp = byword(dp, lasts)) != NULL)
j;
static char *fullname;
static struct tzhead tzh;
- pg_time_t ats[TZ_MAX_TIMES];
+ pg_time_t ats[TZ_MAX_TIMES];
unsigned char types[TZ_MAX_TIMES];
/*
charcnt = 0;
/*
- * need to unconditionally initialize startttisstd.
+ * to unconditionally initialize startttisstd.
*/
startttisstd = FALSE;
startttisgmt = FALSE;
break;
/*
- * Mark which rules to do in the current year. For
- * those to do, calculate rpytime(rp, year);
+ * Mark which rules to do in the current year. For those
+ * to do, calculate rpytime(rp, year);
*/
for (j = 0; j < zp->z_nrules; ++j)
{
if (useuntil)
{
/*
- * Turn untiltime into UTC assuming the
- * current gmtoff and stdoff values.
+ * Turn untiltime into UTC assuming the current
+ * gmtoff and stdoff values.
*/
untiltime = zp->z_untiltime;
if (!zp->z_untilrule.r_todisgmt)
}
/*
- * Find the rule (of those to do, if any) that
- * takes effect earliest in the year.
+ * Find the rule (of those to do, if any) that takes
+ * effect earliest in the year.
*/
k = -1;
for (j = 0; j < zp->z_nrules; ++j)
}
/*
- * There isn't one; add a new one, unless there are already too
- * many.
+ * There isn't one; add a new one, unless there are already too many.
*/
if (typecnt >= TZ_MAX_TYPES)
{
if (!itsdir(name))
{
/*
- * It doesn't seem to exist, so we try to create it.
- * Creation may fail because of the directory being created
- * by some other multiprocessor, so we get to do extra
- * checking.
+ * It doesn't seem to exist, so we try to create it. Creation
+ * may fail because of the directory being created by some
+ * other multiprocessor, so we get to do extra checking.
*/
if (mkdir(name, MKDIR_UMASK) != 0)
{
#endif
#endif
-#define WAL_FILE_SIZE (16 * 1024 * 1024)
+#define WAL_FILE_SIZE (16 * 1024 * 1024)
void die(char *str);
void print_elapse(struct timeval start_t, struct timeval elapse_t);
struct timeval elapse_t;
int tmpfile,
i,
- loops=1000;
+ loops = 1000;
char *strout = (char *) malloc(WAL_FILE_SIZE);
char *filename = FSYNC_FILENAME;
- if (argc > 2 && strcmp(argv[1],"-f") == 0)
+ if (argc > 2 && strcmp(argv[1], "-f") == 0)
{
filename = argv[2];
argv += 2;
argc -= 2;
}
-
+
if (argc > 1)
- loops = atoi(argv[1]);
-
+ loops = atoi(argv[1]);
+
for (i = 0; i < WAL_FILE_SIZE; i++)
strout[i] = 'a';
* Portions Copyright (c) 1996-2004, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
- * $PostgreSQL: pgsql/src/tools/thread/thread_test.c,v 1.32 2004/08/29 04:13:13 momjian Exp $
+ * $PostgreSQL: pgsql/src/tools/thread/thread_test.c,v 1.33 2004/08/29 05:07:03 momjian Exp $
*
* This program tests to see if your standard libc functions use
* pthread_setspecific()/pthread_getspecific() to be thread-safe.
void func_call_1(void);
void func_call_2(void);
-#define TEMP_FILENAME_1 "/tmp/thread_test.1.XXXXXX"
-#define TEMP_FILENAME_2 "/tmp/thread_test.2.XXXXXX"
+#define TEMP_FILENAME_1 "/tmp/thread_test.1.XXXXXX"
+#define TEMP_FILENAME_2 "/tmp/thread_test.2.XXXXXX"
char *temp_filename_1;
char *temp_filename_2;
pthread_t thread1,
thread2;
int fd;
-
+
if (argc > 1)
{
fprintf(stderr, "Usage: %s\n", argv[0]);
close(1);
dup(5);
#endif
-
+
/* Make temp filenames, might not have strdup() */
temp_filename_1 = malloc(strlen(TEMP_FILENAME_1) + 1);
strcpy(temp_filename_1, TEMP_FILENAME_1);
strcpy(temp_filename_2, TEMP_FILENAME_2);
fd = mkstemp(temp_filename_2);
close(fd);
-
+
#if !defined(HAVE_GETADDRINFO) && !defined(HAVE_GETHOSTBYNAME_R)
if (gethostname(myhostname, MAXHOSTNAMELEN) != 0)
{
pthread_mutex_lock(&init_mutex); /* wait for parent to test */
pthread_mutex_unlock(&init_mutex);
}
-#endif /* !ENABLE_THREAD_SAFETY && !IN_CONFIGURE */
+
+#endif /* !ENABLE_THREAD_SAFETY && !IN_CONFIGURE */
* the same signature as far as C is concerned. We provide these prototypes
* just to forestall warnings when compiled with gcc -Wmissing-prototypes.
*/
-Datum complex_in(PG_FUNCTION_ARGS);
-Datum complex_out(PG_FUNCTION_ARGS);
-Datum complex_recv(PG_FUNCTION_ARGS);
-Datum complex_send(PG_FUNCTION_ARGS);
-Datum complex_add(PG_FUNCTION_ARGS);
-Datum complex_abs_lt(PG_FUNCTION_ARGS);
-Datum complex_abs_le(PG_FUNCTION_ARGS);
-Datum complex_abs_eq(PG_FUNCTION_ARGS);
-Datum complex_abs_ge(PG_FUNCTION_ARGS);
-Datum complex_abs_gt(PG_FUNCTION_ARGS);
-Datum complex_abs_cmp(PG_FUNCTION_ARGS);
+Datum complex_in(PG_FUNCTION_ARGS);
+Datum complex_out(PG_FUNCTION_ARGS);
+Datum complex_recv(PG_FUNCTION_ARGS);
+Datum complex_send(PG_FUNCTION_ARGS);
+Datum complex_add(PG_FUNCTION_ARGS);
+Datum complex_abs_lt(PG_FUNCTION_ARGS);
+Datum complex_abs_le(PG_FUNCTION_ARGS);
+Datum complex_abs_eq(PG_FUNCTION_ARGS);
+Datum complex_abs_ge(PG_FUNCTION_ARGS);
+Datum complex_abs_gt(PG_FUNCTION_ARGS);
+Datum complex_abs_cmp(PG_FUNCTION_ARGS);
/*****************************************************************************
Datum
complex_out(PG_FUNCTION_ARGS)
{
- Complex *complex = (Complex *) PG_GETARG_POINTER(0);
+ Complex *complex = (Complex *) PG_GETARG_POINTER(0);
char *result;
result = (char *) palloc(100);
Datum
complex_send(PG_FUNCTION_ARGS)
{
- Complex *complex = (Complex *) PG_GETARG_POINTER(0);
+ Complex *complex = (Complex *) PG_GETARG_POINTER(0);
StringInfoData buf;
pq_begintypsend(&buf);
Datum
complex_add(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
Complex *result;
result = (Complex *) palloc(sizeof(Complex));
* It's essential that the comparison operators and support function for a
* B-tree index opclass always agree on the relative ordering of any two
* data values. Experience has shown that it's depressingly easy to write
- * unintentionally inconsistent functions. One way to reduce the odds of
+ * unintentionally inconsistent functions. One way to reduce the odds of
* making a mistake is to make all the functions simple wrappers around
* an internal three-way-comparison function, as we do here.
*****************************************************************************/
#define Mag(c) ((c)->x*(c)->x + (c)->y*(c)->y)
static int
-complex_abs_cmp_internal(Complex *a, Complex *b)
+complex_abs_cmp_internal(Complex * a, Complex * b)
{
double amag = Mag(a),
bmag = Mag(b);
Datum
complex_abs_lt(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(complex_abs_cmp_internal(a, b) < 0);
}
Datum
complex_abs_le(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(complex_abs_cmp_internal(a, b) <= 0);
}
Datum
complex_abs_eq(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(complex_abs_cmp_internal(a, b) == 0);
}
Datum
complex_abs_ge(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(complex_abs_cmp_internal(a, b) >= 0);
}
Datum
complex_abs_gt(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_BOOL(complex_abs_cmp_internal(a, b) > 0);
}
Datum
complex_abs_cmp(PG_FUNCTION_ARGS)
{
- Complex *a = (Complex *) PG_GETARG_POINTER(0);
- Complex *b = (Complex *) PG_GETARG_POINTER(1);
+ Complex *a = (Complex *) PG_GETARG_POINTER(0);
+ Complex *b = (Complex *) PG_GETARG_POINTER(1);
PG_RETURN_INT32(complex_abs_cmp_internal(a, b));
}